code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : int=3_2 * 4 , UpperCAmelCase_ : List[Any]=3_2 * 6 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=3_2 , ):
"""simple docstring"""
a : Optional[int] = parent
a : Optional[Any] = batch_size
a : Tuple = is_training
a : str = use_auxiliary_loss
a : Any = num_queries
a : Dict = num_channels
a : Any = min_size
a : str = max_size
a : List[str] = num_labels
a : int = mask_feature_size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCAmelCase_)
a : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_)
a : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_) > 0.5
).float()
a : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_) > 0.5).long()
a : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = self.prepare_config_and_inputs()
a : Optional[int] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : List[str] = output.encoder_hidden_states
a : Optional[int] = output.pixel_decoder_hidden_states
a : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase_) , config.decoder_config.decoder_layers)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=False):
"""simple docstring"""
with torch.no_grad():
a : Tuple = MaskFormerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_)
a : Dict = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = MaskFormerForInstanceSegmentation(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
def comm_check_on_output(UpperCAmelCase_ : List[Any]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
a : Tuple = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_)
a : Any = model(UpperCAmelCase_)
comm_check_on_output(UpperCAmelCase_)
a : Optional[int] = model(
pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_)
comm_check_on_output(UpperCAmelCase_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A : Optional[int] = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A : int = False
A : Tuple = False
A : List[str] = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = MaskFormerModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase_)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(UpperCAmelCase_)
a : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a : Optional[int] = MaskFormerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = (self.model_tester.min_size,) * 2
a : int = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCAmelCase_),
'mask_labels': torch.randn((2, 1_0, *size) , device=UpperCAmelCase_),
'class_labels': torch.zeros(2 , 1_0 , device=UpperCAmelCase_).long(),
}
a : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCAmelCase_)
a : Optional[int] = model(**UpperCAmelCase_)
self.assertTrue(outputs.loss is not None)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(UpperCAmelCase_).to(UpperCAmelCase_)
a : List[str] = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_)
self.assertTrue(outputs.attentions is not None)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a : Union[str, Any] = self.all_model_classes[1]
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
a : Union[str, Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : Optional[int] = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = self.all_model_classes[1]
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
a : int = True
a : Any = True
a : int = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : List[str] = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_)
a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCamelCase : str = 1E-4
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(UpperCAmelCase_)
a : Tuple = self.default_image_processor
a : Optional[int] = prepare_img()
a : int = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
a : Dict = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
a : Optional[Any] = model(**UpperCAmelCase_)
a : Optional[int] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(UpperCAmelCase_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
a : Dict = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(UpperCAmelCase_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
a : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(UpperCAmelCase_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCAmelCase_)
.eval()
)
a : Dict = self.default_image_processor
a : Optional[Any] = prepare_img()
a : List[Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
a : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
a : str = model(**UpperCAmelCase_)
# masks_queries_logits
a : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a : Dict = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
a : Any = torch.tensor(UpperCAmelCase_).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
# class_queries_logits
a : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(UpperCAmelCase_)
.eval()
)
a : List[Any] = self.default_image_processor
a : str = prepare_img()
a : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
a : Optional[int] = model(**UpperCAmelCase_)
# masks_queries_logits
a : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a : str = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
a : Dict = torch.tensor(UpperCAmelCase_).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
# class_queries_logits
a : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a : List[str] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCAmelCase_)
.eval()
)
a : Optional[Any] = self.default_image_processor
a : Dict = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='pt' , )
a : List[str] = inputs['pixel_values'].to(UpperCAmelCase_)
a : int = [el.to(UpperCAmelCase_) for el in inputs['mask_labels']]
a : Tuple = [el.to(UpperCAmelCase_) for el in inputs['class_labels']]
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
self.assertTrue(outputs.loss is not None)
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : List[str]=4_0_0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=1 / 2_5_5 , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Union[str, Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
a : Any = parent
a : Optional[Any] = batch_size
a : Optional[int] = num_channels
a : Union[str, Any] = min_resolution
a : Union[str, Any] = max_resolution
a : Any = do_resize
a : Any = size
a : Any = do_normalize
a : List[str] = image_mean
a : Optional[int] = image_std
a : str = do_rescale
a : Tuple = rescale_factor
a : List[Any] = do_pad
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False):
"""simple docstring"""
if not batched:
a : Any = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image):
a : Optional[Any] = image.size
else:
a : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
a : Dict = int(self.size['shortest_edge'] * h / w)
a : Optional[Any] = self.size['shortest_edge']
elif w > h:
a : str = self.size['shortest_edge']
a : Optional[Any] = int(self.size['shortest_edge'] * w / h)
else:
a : Dict = self.size['shortest_edge']
a : List[Any] = self.size['shortest_edge']
else:
a : Any = []
for image in image_inputs:
a : int = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a : Optional[Any] = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_: item[0])[0]
a : List[str] = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : int = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = ConditionalDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'size'))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad , UpperCAmelCase_)
a : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase_)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4})
self.assertEqual(image_processor.do_pad , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a : str = self.image_processor_tester.get_expected_values(UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : str = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_)
a : Any = image_processing(UpperCAmelCase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a : Tuple = self.image_processor_tester.get_expected_values(UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : Tuple = image_processing(UpperCAmelCase_ , return_tensors='pt').pixel_values
a : int = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
a : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : List[Any] = image_processing(UpperCAmelCase_ , return_tensors='pt').pixel_values
a : int = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a : str = json.loads(f.read())
a : Any = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
a : Any = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50')
a : Union[str, Any] = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors='pt')
# verify pixel values
a : str = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase_)
a : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase_ , atol=1e-4))
# verify area
a : Union[str, Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase_))
# verify boxes
a : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase_ , atol=1e-3))
# verify image_id
a : Optional[int] = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase_))
# verify is_crowd
a : str = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase_))
# verify class_labels
a : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase_))
# verify orig_size
a : Dict = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase_))
# verify size
a : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase_))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a : Tuple = json.loads(f.read())
a : List[str] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
a : Optional[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a : Dict = ConditionalDetrImageProcessor(format='coco_panoptic')
a : List[str] = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors='pt')
# verify pixel values
a : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase_ , atol=1e-4))
# verify area
a : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase_))
# verify boxes
a : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase_)
a : Optional[int] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase_ , atol=1e-3))
# verify image_id
a : str = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase_))
# verify is_crowd
a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase_))
# verify class_labels
a : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase_))
# verify masks
a : str = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase_)
# verify orig_size
a : List[Any] = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase_))
# verify size
a : List[str] = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase_))
| 365 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A : ClassVar[Features] = Features({"audio": Audio()} )
A : ClassVar[Features] = Features({"transcription": Value("string" )} )
A : str = "audio"
A : str = "transcription"
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : int):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""")
if not isinstance(features[self.audio_column] , UpperCAmelCase_):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""")
a : Dict = copy.deepcopy(self)
a : str = self.input_schema.copy()
a : str = features[self.audio_column]
a : int = input_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
super().__init__()
a : str = model
a : Tuple = 2
a : Any = nn.Linear(self.model.config.hidden_size , self.num_labels)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str ) -> int:
"""simple docstring"""
a : Optional[Any] = LongformerModel.from_pretrained(snake_case )
a : List[Any] = LightningModel(snake_case )
a : str = torch.load(snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
a : str = LongformerForQuestionAnswering.from_pretrained(snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(snake_case )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase : int = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 367 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Optional[int] = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
a : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a : Dict = bs[:]
a : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
a : int = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
a : str = set()
a : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : Tuple = char
return pairs
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]="replace" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
a : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
a : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
a : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
a : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
a : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
a : str = json.load(UpperCAmelCase_)
a : int = {v: k for k, v in self.encoder.items()}
a : List[Any] = errors # how to handle errors in decoding
a : Union[str, Any] = bytes_to_unicode()
a : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
a : List[Any] = merges_handle.read().split('\n')[1:-1]
a : Optional[Any] = [tuple(merge.split()) for merge in bpe_merges]
a : List[str] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Optional[int] = {}
a : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return len(self.encoder)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a : Optional[Any] = tuple(UpperCAmelCase_)
a : Tuple = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
a : Optional[Any] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
a : Union[str, Any] = bigram
a : Optional[Any] = []
a : List[str] = 0
while i < len(UpperCAmelCase_):
try:
a : List[Any] = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a : Union[str, Any] = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a : Dict = tuple(UpperCAmelCase_)
a : int = new_word
if len(UpperCAmelCase_) == 1:
break
else:
a : Tuple = get_pairs(UpperCAmelCase_)
a : Any = ' '.join(UpperCAmelCase_)
a : Tuple = word
return word
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[str] = []
for token in re.findall(self.pat , UpperCAmelCase_):
a : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ''.join(UpperCAmelCase_)
a : int = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
a : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
a : Optional[Any] = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
a : Tuple = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : List[Any] = [self.cls_token_id]
a : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Union[str, Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
a : Dict = ' ' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
a : Union[str, Any] = super()._pad(
encoded_inputs=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
a : Dict = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Dict = len(encoded_inputs['global_attention_mask']) != len(UpperCAmelCase_)
if needs_to_be_padded:
a : Union[str, Any] = len(UpperCAmelCase_) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Optional[Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Dict = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = BertJapaneseTokenizer
A : List[Any] = False
A : Any = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
super().setUp()
a : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Dict = 'こんにちは、世界。 \nこんばんは、世界。'
a : Any = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Dict = self.get_input_output_texts(UpperCAmelCase_)
a : str = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
a : str = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_)
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。')
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab')
self.assertIsNotNone(UpperCAmelCase_)
a : Optional[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : Any = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
a : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(UpperCAmelCase_ , 'wb') as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , 'rb') as handle:
a : Optional[int] = pickle.load(UpperCAmelCase_)
a : List[str] = tokenizer_new.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = MecabTokenizer(mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
try:
a : List[Any] = MecabTokenizer(mecab_dic='unidic_lite')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
try:
a : List[Any] = MecabTokenizer(mecab_dic='unidic')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
try:
a : Any = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[int] = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi')
self.assertIsNotNone(UpperCAmelCase_)
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Any = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
a : int = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(UpperCAmelCase_ , 'wb') as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , 'rb') as handle:
a : Optional[int] = pickle.load(UpperCAmelCase_)
a : Tuple = tokenizer_new.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = SudachiTokenizer(sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : int = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国', '人', '参政', '権'])
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人', '参政権'])
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人参政権'])
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Dict = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp')
self.assertIsNotNone(UpperCAmelCase_)
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
a : str = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(UpperCAmelCase_ , 'wb') as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , 'rb') as handle:
a : Optional[Any] = pickle.load(UpperCAmelCase_)
a : Union[str, Any] = tokenizer_new.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = JumanppTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = JumanppTokenizer(normalize_text=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = JumanppTokenizer(trim_whitespace=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。') , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Tuple = {}
for i, token in enumerate(UpperCAmelCase_):
a : List[str] = i
a : int = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こんにちは'])
self.assertListEqual(tokenizer.tokenize('こんばんは') , ['こん', '##ばんは'])
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは') , ['こん', '##ばんは', '[UNK]', 'こんにちは'])
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp')
a : Union[str, Any] = tokenizer.subword_tokenizer
a : Any = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。')
self.assertListEqual(UpperCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'])
a : Any = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは')
self.assertListEqual(UpperCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'])
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : int = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese')
a : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase_)
a : Union[str, Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase_)
a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
a : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = BertJapaneseTokenizer
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
super().setUp()
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character')
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。')
self.assertListEqual(
UpperCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Dict = {}
for i, token in enumerate(UpperCAmelCase_):
a : Optional[int] = i
a : List[Any] = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こ', 'ん', 'に', 'ち', 'は'])
self.assertListEqual(tokenizer.tokenize('こんにちほ') , ['こ', 'ん', 'に', 'ち', '[UNK]'])
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char')
a : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase_)
a : Optional[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase_)
a : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = 'cl-tohoku/bert-base-japanese'
a : Tuple = AutoTokenizer.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
UpperCamelCase : Any = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCamelCase ( unittest.TestCase , a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[int] = load_tool('text-classification')
self.tool.setup()
a : Optional[int] = load_tool('text-classification' , remote=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : int = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(UpperCAmelCase_ , 'positive')
| 371 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Union[str, Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
a : Optional[int] = Dataset.from_dict(snake_case )
return dataset
class UpperCamelCase ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = get_dataset()
a : Dict = make_duplicate_clusters(UpperCAmelCase_ , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = get_dataset()
a : List[str] = deduplicate_dataset(UpperCAmelCase_)
self.assertEqual(len(UpperCAmelCase_) , 2)
print(UpperCAmelCase_)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCAmelCase_)
| 350 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase : Dict = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : bool = field(default=a_ , metadata={"help": "Whether tp freeze the encoder."} )
A : bool = field(default=a_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
A : Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
A : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
A : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
A : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
A : Optional[str] = field(default=a_ , metadata={"help": "Source language id for translation."} )
A : Optional[str] = field(default=a_ , metadata={"help": "Target language id for translation."} )
A : Optional[int] = field(default=a_ , metadata={"help": "# num_beams to use for evaluation."} )
A : bool = field(
default=a_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[str] ) -> str:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case , os.path.join(snake_case , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : str = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(snake_case , snake_case , snake_case ):
assert hasattr(snake_case , snake_case ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case , snake_case , getattr(snake_case , snake_case ) )
a : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a : List[str] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case , snake_case ):
a : Tuple = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a : Any = SeqaSeqDataset
# Get datasets
a : Any = (
dataset_class(
snake_case , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
a : int = (
dataset_class(
snake_case , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a : int = (
dataset_class(
snake_case , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a : Optional[Any] = (
build_compute_metrics_fn(data_args.task , snake_case ) if training_args.predict_with_generate else None
)
a : Optional[int] = SeqaSeqTrainer(
model=snake_case , args=snake_case , data_args=snake_case , train_dataset=snake_case , eval_dataset=snake_case , data_collator=SeqaSeqDataCollator(
snake_case , snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case , tokenizer=snake_case , )
a : str = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
a : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a : int = train_result.metrics
a : int = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , snake_case , training_args.output_dir )
all_metrics.update(snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a : Dict = trainer.evaluate(metric_key_prefix='val' )
a : str = data_args.n_val
a : Optional[Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , snake_case , training_args.output_dir )
all_metrics.update(snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
a : List[Any] = trainer.predict(test_dataset=snake_case , metric_key_prefix='test' )
a : Dict = test_output.metrics
a : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
a : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , snake_case , training_args.output_dir )
all_metrics.update(snake_case )
if training_args.predict_with_generate:
a : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
a : List[Any] = lmap(str.strip , snake_case )
write_txt_file(snake_case , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(snake_case , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : int = 8_8 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 3_2 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
a : Dict = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , num_layers=UpperCAmelCase_ , dropout=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , cross_attention_dim=UpperCAmelCase_ , attention_bias=UpperCAmelCase_ , sample_size=UpperCAmelCase_ , num_vector_embeds=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , num_embeds_ada_norm=UpperCAmelCase_ , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Optional[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : str = [1, 0]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
a : str = hidden_states
a : List[str] = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
a : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Any = self.transformer_index_for_condition[i]
a : Any = self.transformers[transformer_index](
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ , cross_attention_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
a : int = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase_)
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "M-CLIP"
def __init__( self : Dict , UpperCAmelCase_ : Any=1_0_2_4 , UpperCAmelCase_ : List[Any]=7_6_8 , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : int = transformerDimSize
a : str = imageDimSize
super().__init__(**UpperCAmelCase_)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Union[str, Any] = MCLIPConfig
def __init__( self : Optional[Any] , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = XLMRobertaModel(UpperCAmelCase_)
a : Any = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Optional[Any] = self.transformer(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
a : List[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(UpperCAmelCase_), embs
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCAmelCase_ , )
assert hasattr(self , 'env')
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=UpperCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
TrainingJobAnalytics(UpperCAmelCase_).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
a : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
a : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCAmelCase_)
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
UpperCamelCase : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
UpperCamelCase : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "whisper"
A : List[str] = ["past_key_values"]
A : Optional[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , UpperCAmelCase_ : List[str]=5_1_8_6_5 , UpperCAmelCase_ : Tuple=8_0 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=1_5_3_6 , UpperCAmelCase_ : Optional[Any]=1_5_3_6 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Any=5_0_2_5_7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=2_5_6 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=1_5_0_0 , UpperCAmelCase_ : List[Any]=4_4_8 , UpperCAmelCase_ : Optional[int]=5_0_2_5_6 , UpperCAmelCase_ : Optional[int]=5_0_2_5_6 , UpperCAmelCase_ : List[Any]=5_0_2_5_6 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=[2_2_0, 5_0_2_5_6] , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int=2_5_6 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : str=0.05 , UpperCAmelCase_ : str=1_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[str]=1_0 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Any=7 , **UpperCAmelCase_ : int , ):
"""simple docstring"""
a : str = vocab_size
a : Optional[Any] = num_mel_bins
a : Dict = d_model
a : List[Any] = encoder_layers
a : Union[str, Any] = encoder_attention_heads
a : Optional[int] = decoder_layers
a : Dict = decoder_attention_heads
a : Dict = decoder_ffn_dim
a : Any = encoder_ffn_dim
a : Optional[int] = dropout
a : List[Any] = attention_dropout
a : Optional[int] = activation_dropout
a : Optional[int] = activation_function
a : Tuple = init_std
a : str = encoder_layerdrop
a : List[Any] = decoder_layerdrop
a : List[Any] = use_cache
a : Any = encoder_layers
a : str = scale_embedding # scale factor will be sqrt(d_model) if True
a : Union[str, Any] = max_source_positions
a : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a : Dict = classifier_proj_size
a : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : Union[str, Any] = apply_spec_augment
a : Optional[int] = mask_time_prob
a : str = mask_time_length
a : int = mask_time_min_masks
a : List[Any] = mask_feature_prob
a : Union[str, Any] = mask_feature_length
a : Dict = mask_feature_min_masks
a : List[str] = median_filter_width
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , suppress_tokens=UpperCAmelCase_ , begin_suppress_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a : List[str] = {0: 'batch'}
else:
a : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 2_2_0_5_0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 2_2_0 , ):
"""simple docstring"""
a : List[str] = OrderedDict()
a : str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCAmelCase_ , framework=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , time_duration=UpperCAmelCase_ , frequency=UpperCAmelCase_ , )
a : int = encoder_inputs['input_features'].shape[2]
a : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
a : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : Tuple = encoder_inputs.pop('input_features')
a : Any = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a : List[Any] = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 1e-3
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int ) -> int:
"""simple docstring"""
a : Optional[int] = len(snake_case )
a : Optional[int] = int(math.floor(math.sqrt(snake_case ) ) )
a : Tuple = 0
while arr[min(snake_case , snake_case ) - 1] < x:
a : Optional[int] = step
step += int(math.floor(math.sqrt(snake_case ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a : Any = prev + 1
if prev == min(snake_case , snake_case ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
UpperCamelCase : Dict = int(input("""Enter the number to be searched:\n"""))
UpperCamelCase : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f'''Number {x} is at index {res}''')
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase ( a_ , a_ ):
"""simple docstring"""
A : int = "nat"
A : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Union[str, Any]=6_4 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 1_6] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Optional[Any]=3.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Optional[Any] = patch_size
a : List[Any] = num_channels
a : str = embed_dim
a : Dict = depths
a : Optional[int] = len(UpperCAmelCase_)
a : List[str] = num_heads
a : Optional[Any] = kernel_size
a : Optional[Any] = mlp_ratio
a : List[str] = qkv_bias
a : str = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : List[str] = drop_path_rate
a : Dict = hidden_act
a : List[Any] = layer_norm_eps
a : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : Dict = int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
a : Optional[Any] = layer_scale_init_value
a : Optional[int] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_) + 1)]
a : Optional[Any] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = torch.device("""cpu""")
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
a : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Tuple = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int , snake_case : Dict ):
"""simple docstring"""
a : List[Any] = dct.pop(snake_case )
a : Dict = val
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ):
"""simple docstring"""
a : Any = []
for k in state_dict.keys():
a : List[str] = k
if ".pwconv" in k:
a : Optional[int] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a : int = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a : Any = k_new.split('.' )
if ls[2].isdigit():
a : Any = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Optional[int] ):
"""simple docstring"""
a : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a : Any = 1_000
a : Dict = 'huggingface/label-files'
a : Any = 'imagenet-1k-id2label.json'
a : str = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Dict = {int(snake_case ): v for k, v in idalabel.items()}
a : List[Any] = idalabel
a : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a : int = [3, 3, 6, 4]
a : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a : str = [3, 3, 9, 6]
a : int = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a : List[str] = [4, 3, 10, 5]
a : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a : Any = [4, 4, 12, 6]
a : Optional[int] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a : Any = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' , check_hash=snake_case )
else:
a : List[str] = torch.load(snake_case , map_location='cpu' )
a : List[Any] = checkpoint
a : List[str] = create_rename_keys(snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load HuggingFace model
a : Union[str, Any] = SwiftFormerForImageClassification(snake_case ).eval()
hf_model.load_state_dict(snake_case )
# prepare test inputs
a : List[Any] = prepare_img()
a : Dict = ViTImageProcessor.from_pretrained('preprocessor_config' )
a : str = processor(images=snake_case , return_tensors='pt' )
# compare outputs from both models
a : Any = get_expected_output(snake_case )
a : str = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case , atol=1E-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
UpperCamelCase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase : Optional[int] = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 131_072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> int:
return torch.atana(snake_case , snake_case ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[int]:
a : int = torch.sin(t * math.pi / 2 ) ** 2
a : Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class UpperCamelCase ( a_ ):
"""simple docstring"""
pass
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : int):
"""simple docstring"""
super().__init__()
a : int = DiffusionAttnUnetaD(UpperCAmelCase_ , n_attn_layers=4)
a : Union[str, Any] = deepcopy(self.diffusion)
a : Tuple = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> List[str]:
a : Optional[Any] = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
UpperCamelCase : Tuple = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
UpperCamelCase : str = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
UpperCamelCase : Optional[int] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
UpperCamelCase : List[Any] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
UpperCamelCase : Union[str, Any] = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
UpperCamelCase : List[str] = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> List[str]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> int:
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple=13 ) -> Any:
a : str = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
a : Dict = 0
if string.startswith('net.3.' ):
depth += 1
a : Any = string[6:]
elif string.startswith('net.' ):
a : Optional[int] = string[4:]
while string.startswith('main.7.' ):
depth += 1
a : str = string[7:]
if string.startswith('main.' ):
a : Optional[Any] = string[5:]
# mid block
if string[:2].isdigit():
a : Tuple = string[:2]
a : int = string[2:]
else:
a : Any = string[0]
a : str = string[1:]
if depth == max_depth:
a : str = MID_NUM_TO_LAYER[layer_num]
a : int = 'mid_block'
elif depth > 0 and int(snake_case ) < 7:
a : int = DOWN_NUM_TO_LAYER[layer_num]
a : Optional[Any] = F"""down_blocks.{depth}"""
elif depth > 0 and int(snake_case ) > 7:
a : Any = UP_NUM_TO_LAYER[layer_num]
a : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
a : List[str] = DEPTH_0_TO_LAYER[layer_num]
a : List[Any] = F"""up_blocks.{max_depth - 1}""" if int(snake_case ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
a : Optional[int] = string_left[1:]
if "resnets" in new_layer:
a : int = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
a : Any = convert_attn_naming(snake_case )
a : int = new_string_left
if not isinstance(snake_case , snake_case ):
a : Tuple = prefix + '.' + new_layer + '.' + string_left
else:
a : List[Any] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
a : int = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
a : List[Any] = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
a : Any = transform_conv_attns(snake_case , snake_case , snake_case )
else:
a : Dict = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[int] ) -> str:
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
a : int = v[:, :, 0]
else:
# bias
a : List[str] = v
else:
# qkv matrices
a : Any = v.shape[0]
a : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
a : Tuple = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
a : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
a : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
a : Any = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
a : Any = download(snake_case )
a : List[str] = MODELS_MAP[model_name]['sample_rate']
a : Optional[int] = MODELS_MAP[model_name]['sample_size']
a : Union[str, Any] = Object()
a : Any = sample_size
a : Dict = sample_rate
a : Dict = 0
a : List[str] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
a : Any = diffusers_model.state_dict()
a : Tuple = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )['state_dict'] )
a : int = orig_model.diffusion_ema.eval()
a : Tuple = orig_model.state_dict()
a : Optional[Any] = rename_orig_weights(snake_case )
a : Tuple = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
a : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(snake_case ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
a : str = value.squeeze()
a : Optional[int] = value
diffusers_model.load_state_dict(snake_case )
a : Union[str, Any] = 100
a : Dict = 33
a : str = IPNDMScheduler(num_train_timesteps=snake_case )
a : List[Any] = torch.manual_seed(snake_case )
a : List[str] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
a : int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
a : Tuple = get_crash_schedule(snake_case )
a : Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
a : Optional[Any] = torch.manual_seed(33 )
a : Optional[Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
a : Union[str, Any] = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
a : Union[str, Any] = generated.clamp(-1 , 1 )
a : Tuple = (generated - audio).abs().sum()
a : str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , snake_case )
print('Diff max' , snake_case )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCamelCase : int = parser.parse_args()
main(args)
| 359 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCamelCase : Any = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
UpperCamelCase : int = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
UpperCamelCase : Any = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
UpperCamelCase : str = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[int]=0.5):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
a : Union[str, Any] = [
meteor_score.single_meteor_score(
word_tokenize(UpperCAmelCase_) , word_tokenize(UpperCAmelCase_) , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_)
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
else:
a : Optional[int] = [
meteor_score.single_meteor_score(UpperCAmelCase_ , UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_)
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
return {"meteor": np.mean(UpperCAmelCase_)}
| 360 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : Tuple = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
a : List[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
a : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
a : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
a : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
a : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
a : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
a : str = datetime.date(int(snake_case ) , int(snake_case ) , int(snake_case ) )
# Start math
if m <= 2:
a : Any = y - 1
a : str = m + 12
# maths var
a : int = int(str(snake_case )[:2] )
a : int = int(str(snake_case )[2:] )
a : int = int(2.6 * m - 5.39 )
a : int = int(c / 4 )
a : int = int(k / 4 )
a : int = int(d + k )
a : int = int(t + u + v + x )
a : int = int(z - (2 * c) )
a : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
a : str = F"""Your date {date_input}, is a {days[str(snake_case )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Any = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
UpperCamelCase : List[Any] = parser.parse_args()
zeller(args.date_input)
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[int] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : List[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : str = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
a : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Union[str, Any] = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
a : Optional[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
a : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_)) | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = []
a : Optional[Any] = []
for i in range(self.num_layers):
a : int = self.in_channels if i == 0 else self.out_channels
a : Any = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : Optional[int] = resnets
a : List[Any] = attentions
if self.add_downsample:
a : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=True):
"""simple docstring"""
a : List[str] = ()
for resnet, attn in zip(self.resnets , self.attentions):
a : str = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : int = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
output_states += (hidden_states,)
if self.add_downsample:
a : Any = self.downsamplers_a(UpperCAmelCase_)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : float = 0.0
A : int = 1
A : bool = True
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = []
for i in range(self.num_layers):
a : Dict = self.in_channels if i == 0 else self.out_channels
a : Tuple = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Any = resnets
if self.add_downsample:
a : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=True):
"""simple docstring"""
a : Dict = ()
for resnet in self.resnets:
a : Any = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
output_states += (hidden_states,)
if self.add_downsample:
a : Tuple = self.downsamplers_a(UpperCAmelCase_)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = []
a : str = []
for i in range(self.num_layers):
a : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
a : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : int = resnets
a : Dict = attentions
if self.add_upsample:
a : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=True):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions):
# pop res hidden states
a : Union[str, Any] = res_hidden_states_tuple[-1]
a : Tuple = res_hidden_states_tuple[:-1]
a : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
a : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : str = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
if self.add_upsample:
a : List[str] = self.upsamplers_a(UpperCAmelCase_)
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : int
A : float = 0.0
A : int = 1
A : bool = True
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Tuple = []
for i in range(self.num_layers):
a : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : int = resnets
if self.add_upsample:
a : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=True):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
a : Optional[Any] = res_hidden_states_tuple[-1]
a : Dict = res_hidden_states_tuple[:-1]
a : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
a : int = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
if self.add_upsample:
a : List[str] = self.upsamplers_a(UpperCAmelCase_)
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a : Any = []
for _ in range(self.num_layers):
a : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Tuple = resnets
a : Tuple = attentions
def __call__( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=True):
"""simple docstring"""
a : Dict = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_)
for attn, resnet in zip(self.attentions , self.resnets[1:]):
a : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : int = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
return hidden_states
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 365 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
UpperCamelCase : Optional[int] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 367 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {"""vocab_file""": """vocab.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
UpperCamelCase : Any = {"""mgp-str""": 27}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : Optional[int]="[s]" , UpperCAmelCase_ : Dict="[GO]" , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
super().__init__(
unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
a : Optional[int] = json.load(UpperCAmelCase_)
a : Any = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return len(self.vocab)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : List[str] = []
for s in text:
char_tokens.extend(UpperCAmelCase_)
return char_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return self.vocab.get(UpperCAmelCase_ , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error('Vocabulary path ({}) should be a directory'.format(UpperCAmelCase_))
return
a : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
return (vocab_file,)
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCamelCase : str = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_INIT_CONFIGURATION
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = RealmTokenizer
def __init__( self : int , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Optional[int]="[SEP]" , UpperCAmelCase_ : Tuple="[PAD]" , UpperCAmelCase_ : Any="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCAmelCase_) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_) != tokenize_chinese_chars
):
a : List[str] = getattr(UpperCAmelCase_ , normalizer_state.pop('type'))
a : Tuple = do_lower_case
a : Optional[int] = strip_accents
a : List[Any] = tokenize_chinese_chars
a : str = normalizer_class(**UpperCAmelCase_)
a : int = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[Any] = PaddingStrategy.MAX_LENGTH
a : Any = text
a : Tuple = kwargs.pop('text_pair' , UpperCAmelCase_)
a : Tuple = kwargs.pop('return_tensors' , UpperCAmelCase_)
a : List[str] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase_):
if batch_text_pair is not None:
a : Union[str, Any] = batch_text_pair[idx]
else:
a : Dict = None
a : str = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
a : Dict = encoded_candidates.get('input_ids')
a : List[Any] = encoded_candidates.get('attention_mask')
a : Any = encoded_candidates.get('token_type_ids')
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase_)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase_)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase_)
a : str = {key: item for key, item in output_data.items() if len(UpperCAmelCase_) != 0}
return BatchEncoding(UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Dict = [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
a : str = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = "trocr"
A : Optional[int] = ["past_key_values"]
A : Optional[Any] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=5_0_2_6_5 , UpperCAmelCase_ : List[str]=1_0_2_4 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : int=1_6 , UpperCAmelCase_ : List[Any]=4_0_9_6 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Optional[int]=2 , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : List[str] = vocab_size
a : Optional[int] = d_model
a : Any = decoder_layers
a : str = decoder_attention_heads
a : int = decoder_ffn_dim
a : str = activation_function
a : str = max_position_embeddings
a : Dict = dropout
a : Any = attention_dropout
a : Tuple = activation_dropout
a : List[str] = init_std
a : Optional[Any] = decoder_layerdrop
a : Dict = use_cache
a : List[Any] = scale_embedding
a : Dict = use_learned_position_embeddings
a : Dict = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Optional[Any] = checkpoint
a : Any = {}
a : Dict = vae_state_dict['encoder.conv_in.weight']
a : str = vae_state_dict['encoder.conv_in.bias']
a : Union[str, Any] = vae_state_dict['encoder.conv_out.weight']
a : Dict = vae_state_dict['encoder.conv_out.bias']
a : Union[str, Any] = vae_state_dict['encoder.norm_out.weight']
a : Tuple = vae_state_dict['encoder.norm_out.bias']
a : Optional[Any] = vae_state_dict['decoder.conv_in.weight']
a : Optional[Any] = vae_state_dict['decoder.conv_in.bias']
a : int = vae_state_dict['decoder.conv_out.weight']
a : List[Any] = vae_state_dict['decoder.conv_out.bias']
a : Union[str, Any] = vae_state_dict['decoder.norm_out.weight']
a : List[str] = vae_state_dict['decoder.norm_out.bias']
a : Optional[Any] = vae_state_dict['quant_conv.weight']
a : str = vae_state_dict['quant_conv.bias']
a : Union[str, Any] = vae_state_dict['post_quant_conv.weight']
a : Union[str, Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
a : str = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
a : Dict = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(snake_case )
}
# Retrieves the keys for the decoder up blocks only
a : Optional[int] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
a : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(snake_case )
}
for i in range(snake_case ):
a : Tuple = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a : List[str] = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a : Optional[int] = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a : Optional[Any] = renew_vae_resnet_paths(snake_case )
a : Tuple = {'old': F"""down.{i}.block""", 'new': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
a : Union[str, Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
a : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a : List[Any] = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a : str = renew_vae_resnet_paths(snake_case )
a : Optional[Any] = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
a : Any = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
a : List[str] = renew_vae_attention_paths(snake_case )
a : Any = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
conv_attn_to_linear(snake_case )
for i in range(snake_case ):
a : List[str] = num_up_blocks - 1 - i
a : List[str] = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a : Union[str, Any] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a : Tuple = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a : Dict = renew_vae_resnet_paths(snake_case )
a : Union[str, Any] = {'old': F"""up.{block_id}.block""", 'new': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
a : Optional[int] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
a : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a : Dict = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a : List[str] = renew_vae_resnet_paths(snake_case )
a : List[str] = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
a : str = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
a : Tuple = renew_vae_attention_paths(snake_case )
a : Optional[int] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
conv_attn_to_linear(snake_case )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , ) -> str:
"""simple docstring"""
a : int = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
a : List[Any] = io.BytesIO(r.content )
a : str = OmegaConf.load(snake_case )
a : int = 512
a : int = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
a : Optional[int] = {}
with safe_open(snake_case , framework='pt' , device='cpu' ) as f:
for key in f.keys():
a : Optional[Any] = f.get_tensor(snake_case )
else:
a : List[Any] = torch.load(snake_case , map_location=snake_case )['state_dict']
# Convert the VAE model.
a : Tuple = create_vae_diffusers_config(snake_case , image_size=snake_case )
a : Tuple = custom_convert_ldm_vae_checkpoint(snake_case , snake_case )
a : int = AutoencoderKL(**snake_case )
vae.load_state_dict(snake_case )
vae.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCamelCase : Union[str, Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 371 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : Optional[int] ) -> str:
# Construct model
if gpta_config_file == "":
_a = GPTaConfig()
else:
_a = GPTaConfig.from_json_file(lowercase )
_a = GPTaModel(lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase , lowercase , lowercase )
# Save pytorch-model
_a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_a = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
lowerCAmelCase_ : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='wavlm'
def __init__( self : Union[str, Any] , __a : List[str]=32 , __a : Tuple=7_68 , __a : Union[str, Any]=12 , __a : Optional[Any]=12 , __a : str=30_72 , __a : Dict="gelu" , __a : int=0.1 , __a : List[str]=0.1 , __a : Optional[int]=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.1 , __a : Optional[Any]=0.1 , __a : Any=0.02 , __a : List[Any]=1e-5 , __a : Tuple="group" , __a : Union[str, Any]="gelu" , __a : Any=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a : str=(5, 2, 2, 2, 2, 2, 2) , __a : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __a : List[Any]=False , __a : List[Any]=1_28 , __a : Union[str, Any]=16 , __a : Union[str, Any]=3_20 , __a : Optional[Any]=8_00 , __a : Optional[Any]=False , __a : List[str]=True , __a : str=0.05 , __a : Tuple=10 , __a : str=2 , __a : List[str]=0.0 , __a : Dict=10 , __a : Optional[int]=3_20 , __a : Optional[Any]=2 , __a : int=0.1 , __a : int=1_00 , __a : str=2_56 , __a : List[str]=2_56 , __a : Union[str, Any]=0.1 , __a : Dict="mean" , __a : Tuple=False , __a : Optional[int]=False , __a : List[Any]=2_56 , __a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 15_00) , __a : Optional[Any]=(5, 3, 3, 1, 1) , __a : int=(1, 2, 3, 1, 1) , __a : List[Any]=5_12 , __a : Dict=80 , __a : str=0 , __a : Tuple=1 , __a : Optional[int]=2 , __a : int=False , __a : List[str]=3 , __a : Any=2 , __a : Tuple=3 , __a : Dict=None , **__a : Tuple , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__a )
_a = list(__a )
_a = list(__a )
_a = conv_bias
_a = num_buckets
_a = max_bucket_distance
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(__a )
_a = list(__a )
_a = list(__a )
_a = xvector_output_dim
@property
def UpperCamelCase__ ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ):
_a = 0
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
def UpperCamelCase__ ( self : List[Any] ):
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCamelCase__ ( self : List[Any] ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
# Check that tokenizer_type ≠ model_type
_a = AutoTokenizer.from_pretrained(__a , config=__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase__ ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__a , "vocab.txt" ) )
_a = AutoTokenizer.from_pretrained(__a , tokenizer_type="bert" , use_fast=__a )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__a , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__a , "merges.txt" ) )
_a = AutoTokenizer.from_pretrained(__a , tokenizer_type="gpt2" , use_fast=__a )
self.assertIsInstance(__a , __a )
@require_tokenizers
def UpperCamelCase__ ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__a , "vocab.txt" ) )
_a = AutoTokenizer.from_pretrained(__a , tokenizer_type="bert" )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__a , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__a , "merges.txt" ) )
_a = AutoTokenizer.from_pretrained(__a , tokenizer_type="gpt2" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[str] ):
with pytest.raises(__a ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def UpperCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_a = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
if isinstance(__a , __a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __a )
else:
self.assertEqual(tokenizer.do_lower_case , __a )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def UpperCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__a , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
_a = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def UpperCamelCase__ ( self : Optional[Any] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_a = TOKENIZER_MAPPING.values()
_a = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__a )
@require_tokenizers
def UpperCamelCase__ ( self : str ):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__a ) , __a )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , __a )
@require_tokenizers
def UpperCamelCase__ ( self : List[Any] ):
_a = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=__a )
_a = "Hello, world. How are you?"
_a = tokenizer.tokenize(__a )
self.assertEqual("[UNK]" , tokens[0] )
_a = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=__a )
_a = tokenizer.tokenize(__a )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def UpperCamelCase__ ( self : Dict ):
_a = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(__a ) , __a )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCamelCase__ ( self : Optional[int] ):
_a = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
# Check we can load the tokenizer config of an online model.
_a = get_tokenizer_config("bert-base-cased" )
_a = config.pop("_commit_hash" , __a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__a , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_a = get_tokenizer_config(__a )
self.assertDictEqual(__a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_a = AutoTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
_a = get_tokenizer_config(__a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def UpperCamelCase__ ( self : str ):
try:
AutoConfig.register("custom" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
_a = CustomTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
# Can register in two steps
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__a , slow_tokenizer_class=__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_a = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
_a = CustomTokenizerFast.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
_a = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
_a = AutoTokenizer.from_pretrained(__a , use_fast=__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a )
_a = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
_a = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
_a = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a , use_fast=__a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def UpperCamelCase__ ( self : List[str] ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =False
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =NewTokenizer
__a =False
try:
AutoConfig.register("custom" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# If remote code is not set, the default is to use local
_a = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
_a = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : List[str] ):
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_a = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def UpperCamelCase__ ( self : str ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoTokenizer.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : List[str] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoTokenizer.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : str ):
# Make sure we have cached the tokenizer.
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 346 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _lowerCamelCase ( ) -> Union[str, Any]:
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(lowercase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 346 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ):
_a = {}
def UpperCamelCase__ ( self : Any , __a : str ):
_a = {}
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : str , __a : float ):
if nodea not in self.connections:
self.add_node(__a )
if nodea not in self.connections:
self.add_node(__a )
_a = probability
def UpperCamelCase__ ( self : Tuple ):
return list(self.connections )
def UpperCamelCase__ ( self : Dict , __a : str ):
_a = 0
_a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _lowerCamelCase ( lowercase : str , lowercase : list[tuple[str, str, float]] , lowercase : int ) -> dict[str, int]:
_a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase , lowercase , lowercase )
_a = Counter(graph.get_nodes() )
_a = start
for _ in range(lowercase ):
_a = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : str , __a : Optional[NestedDataStructureLike[PathLike]] = None , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Tuple , ):
_a = path_or_paths
_a = split if split or isinstance(__a , __a ) else "train"
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def UpperCamelCase__ ( self : List[str] ):
pass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : int , ):
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def UpperCamelCase__ ( self : List[Any] ):
pass
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__a : Dict , **__a : Any ):
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase__ ( self : Dict , __a : Dict=None ):
_a = {}
if top_k is not None:
_a = top_k
return {}, {}, postprocess_params
def __call__( self : Any , __a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__a : Tuple ):
return super().__call__(__a , **__a )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] ):
_a = load_image(__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self : str , __a : str ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] , __a : Any=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.softmax(-1 )[0]
_a , _a = probs.topk(__a )
elif self.framework == "tf":
_a = stable_softmax(model_outputs.logits , axis=-1 )[0]
_a = tf.math.top_k(__a , k=__a )
_a , _a = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCAmelCase_ : Optional[int] = '\nimport os\n'
lowerCAmelCase_ : Any = '\ndef foo():\n import os\n return False\n'
lowerCAmelCase_ : int = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
lowerCAmelCase_ : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
lowerCAmelCase_ : Dict = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
lowerCAmelCase_ : int = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
lowerCAmelCase_ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
lowerCAmelCase_ : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
lowerCAmelCase_ : Tuple = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
lowerCAmelCase_ : Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
lowerCAmelCase_ : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , lowercase )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Optional[int] ) -> Optional[Any]:
_a = os.path.join(lowercase , "test_file.py" )
with open(lowercase , "w" ) as _tmp_file:
_tmp_file.write(lowercase )
_a = get_imports(lowercase )
assert parsed_imports == ["os"]
| 346 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 346 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : str = {'vocab_file': 'vocab.json'}
lowerCAmelCase_ : List[str] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowerCAmelCase_ : List[str] = {'mgp-str': 27}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __a : Tuple , __a : Dict="[GO]" , __a : List[str]="[GO]" , __a : Optional[Any]="[s]" , __a : Optional[Any]="[GO]" , **__a : Optional[Any] ):
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
_a = json.load(__a )
_a = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return len(self.vocab )
def UpperCamelCase__ ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = []
for s in text:
char_tokens.extend(__a )
return char_tokens
def UpperCamelCase__ ( self : int , __a : Optional[Any] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
return (vocab_file,)
| 346 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase_ : Dict = False
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Optional[int] ):
_a = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_a = torch.manual_seed(0 )
_a = pipe.dual_guided(
prompt="first prompt" , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
_a = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = generator.manual_seed(0 )
_a = pipe.dual_guided(
prompt="first prompt" , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self : int ):
_a = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = "cyberpunk 2077"
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_a = torch.manual_seed(0 )
_a = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_a = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_a = "A painting of a squirrel eating a burger "
_a = torch.manual_seed(0 )
_a = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_a = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_a = pipe.image_variation(__a , generator=__a , output_type="numpy" ).images
_a = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 346 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ : List[str] = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
lowerCAmelCase_ : Union[str, Any] = '▁'
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =AlbertTokenizer
def __init__( self : Any , __a : Union[str, Any]=None , __a : Optional[Any]=None , __a : Optional[Any]=True , __a : Dict=True , __a : Optional[int]=False , __a : Dict="[CLS]" , __a : Optional[int]="[SEP]" , __a : Optional[Any]="<unk>" , __a : Any="[SEP]" , __a : Tuple="<pad>" , __a : Optional[Any]="[CLS]" , __a : Union[str, Any]="[MASK]" , **__a : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_a = (
AddedToken(__a , lstrip=__a , rstrip=__a , normalized=__a )
if isinstance(__a , __a )
else mask_token
)
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def UpperCamelCase__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self : Any , __a : str , __a : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCamelCase ( lowercase : str ) -> str:
return EnvironmentCommand()
def _lowerCamelCase ( lowercase : Any ) -> str:
return EnvironmentCommand(args.accelerate_config_file )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __a : ArgumentParser ):
_a = parser.add_parser("env" )
download_parser.set_defaults(func=__a )
download_parser.add_argument(
"--accelerate-config_file" , default=__a , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__a )
def __init__( self : str , __a : str , *__a : List[str] ):
_a = accelerate_config_file
def UpperCamelCase__ ( self : Any ):
_a = "not installed"
if is_safetensors_available():
import safetensors
_a = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_a = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
_a = "not installed"
_a = _a = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_a = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__a ):
_a = load_config_from_file(self._accelerate_config_file ).to_dict()
_a = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__a , __a )
else f'\t{accelerate_config}'
)
_a = "not installed"
_a = "NA"
if is_torch_available():
import torch
_a = torch.__version__
_a = torch.cuda.is_available()
_a = "not installed"
_a = "NA"
if is_tf_available():
import tensorflow as tf
_a = tf.__version__
try:
# deprecated in v2.1
_a = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_a = bool(tf.config.list_physical_devices("GPU" ) )
_a = "not installed"
_a = "not installed"
_a = "not installed"
_a = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_a = flax.__version__
_a = jax.__version__
_a = jaxlib.__version__
_a = jax.lib.xla_bridge.get_backend().platform
_a = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'{safetensors_version}',
"Accelerate version": f'{accelerate_version}',
"Accelerate config": f'{accelerate_config_str}',
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})',
"Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})',
"Jax version": f'{jax_version}',
"JaxLib version": f'{jaxlib_version}',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__a ) )
return info
@staticmethod
def UpperCamelCase__ ( __a : Any ):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 346 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ : Dict = 'examples/'
lowerCAmelCase_ : str = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase_ : Dict = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase_ : Tuple = 'README.md'
def _lowerCamelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : Any ) -> List[str]:
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.read()
_a , _a = REPLACE_PATTERNS[pattern]
_a = replace.replace("VERSION" , lowercase )
_a = re_pattern.sub(lowercase , lowercase )
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
for folder, directories, fnames in os.walk(lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern="examples" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase , lowercase , lowercase )
if not patch:
update_version_in_examples(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = "🤗 Transformers currently provides the following architectures"
_a = "1. Want to contribute a new model?"
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.readlines()
# Find the start of the list.
_a = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_a = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase )
def _lowerCamelCase ( ) -> List[Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
_a = f.read()
_a = REPLACE_PATTERNS["init"][0].search(lowercase ).groups()[0]
return packaging.version.parse(lowercase )
def _lowerCamelCase ( lowercase : Dict=False ) -> Union[str, Any]:
_a = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_a = default_version.base_version
elif patch:
_a = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_a = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_a = input(F'Which version are you releasing? [{default_version}]' )
if len(lowercase ) == 0:
_a = default_version
print(F'Updating version to {version}.' )
global_version_update(lowercase , patch=lowercase )
def _lowerCamelCase ( ) -> Dict:
_a = get_version()
_a = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
_a = current_version.base_version
# Check with the user we got that right.
_a = input(F'Which version are we developing now? [{dev_version}]' )
if len(lowercase ) == 0:
_a = dev_version
print(F'Updating version to {version}.' )
global_version_update(lowercase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase_ : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 346 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionInpaintPipeline
__a =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a =frozenset([] )
def UpperCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
_a = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_a = CLIPTextModel(__a )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self : str , __a : Any , __a : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
_a = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : Dict ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionInpaintPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : List[str] ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_a = "stabilityai/stable-diffusion-2-inpainting"
_a = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "Face of a yellow cat, high resolution, sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def UpperCamelCase__ ( self : Any ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_a = "stabilityai/stable-diffusion-2-inpainting"
_a = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "Face of a yellow cat, high resolution, sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__ ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = "stabilityai/stable-diffusion-2-inpainting"
_a = PNDMScheduler.from_pretrained(__a , subfolder="scheduler" )
_a = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = "Face of a yellow cat, high resolution, sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( lowercase : Union[List, PIL.Image.Image, torch.Tensor] ) -> int:
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , lowercase , )
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
_a = [image]
if isinstance(image[0] , PIL.Image.Image ):
_a , _a = image[0].size
_a , _a = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_a = np.concatenate(lowercase , axis=0 )
_a = np.array(lowercase ).astype(np.floataa ) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2 )
_a = 2.0 * image - 1.0
_a = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
_a = torch.cat(lowercase , dim=0 )
return image
def _lowerCamelCase ( lowercase : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]:
if isinstance(lowercase , torch.Tensor ):
return mask
elif isinstance(lowercase , PIL.Image.Image ):
_a = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_a , _a = mask[0].size
_a , _a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_a = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
_a = np.concatenate(lowercase , axis=0 )
_a = mask.astype(np.floataa ) / 2_55.0
_a = 0
_a = 1
_a = torch.from_numpy(lowercase )
elif isinstance(mask[0] , torch.Tensor ):
_a = torch.cat(lowercase , dim=0 )
return mask
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
__a =42
def __init__( self : Optional[Any] , __a : Tuple , __a : Tuple ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Union[str, Any] , __a : Union[torch.Tensor, PIL.Image.Image] , __a : Union[torch.Tensor, PIL.Image.Image] , __a : int = 2_50 , __a : float = 0.0 , __a : int = 10 , __a : int = 10 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[str] = "pil" , __a : bool = True , ):
_a = image
_a = _preprocess_image(__a )
_a = original_image.to(device=self.device , dtype=self.unet.dtype )
_a = _preprocess_mask(__a )
_a = mask_image.to(device=self.device , dtype=self.unet.dtype )
_a = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__a )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_a = original_image.shape
_a = randn_tensor(__a , generator=__a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__a , __a , __a , self.device )
_a = eta
_a = self.scheduler.timesteps[0] + 1
_a = generator[0] if isinstance(__a , __a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_a = self.unet(__a , __a ).sample
# compute previous image: x_t -> x_t-1
_a = self.scheduler.step(__a , __a , __a , __a , __a , __a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_a = self.scheduler.undo_step(__a , __a , __a )
_a = t
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : float , lowercase : int = 1_6000 ) -> Optional[Any]:
_a = int(round(sample_rate * max_length ) )
if len(lowercase ) <= sample_length:
return wav
_a = randint(0 , len(lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(default=lowerCamelCase_ , metadata={'help': 'Name of a dataset from the datasets package'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'A file containing the training audio paths and labels.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
__a =field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__a =field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__a =field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__a =field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
__a =field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__a =field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__a =field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
__a =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
__a =field(
default=lowerCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCamelCase__ ( self : Tuple ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def _lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_a = DatasetDict()
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_a = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_a = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_a = feature_extractor.model_input_names[0]
def train_transforms(lowercase : Tuple ):
_a = []
for audio in batch[data_args.audio_column_name]:
_a = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase )
_a = feature_extractor(lowercase , sampling_rate=feature_extractor.sampling_rate )
_a = {model_input_name: inputs.get(lowercase )}
_a = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase : Dict ):
_a = [audio["array"] for audio in batch[data_args.audio_column_name]]
_a = feature_extractor(lowercase , sampling_rate=feature_extractor.sampling_rate )
_a = {model_input_name: inputs.get(lowercase )}
_a = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_a = raw_datasets["train"].features[data_args.label_column_name].names
_a , _a = {}, {}
for i, label in enumerate(lowercase ):
_a = str(lowercase )
_a = label
# Load the accuracy metric from the datasets package
_a = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase : Dict ):
_a = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase , references=eval_pred.label_ids )
_a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel=lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_a = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_a = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase , output_all_columns=lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_a = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase , output_all_columns=lowercase )
# Initialize our trainer
_a = Trainer(
model=lowercase , args=lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics("eval" , lowercase )
trainer.save_metrics("eval" , lowercase )
# Write model card and (optionally) push to hub
_a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase_ : Tuple = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
lowerCAmelCase_ : Union[str, Any] = 3e8 # unit of c : m * s^-1
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_a = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_a = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 1 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCamelCase ( lowercase : str ) -> List[Any]:
def decorator(lowercase : Union[str, Any] ):
_a = getattr(lowercase , "handle_key" , [] )
handle += [key]
setattr(lowercase , "handle_key" , lowercase )
return func
return decorator
def _lowerCamelCase ( *lowercase : List[str] ) -> Tuple:
def decorator(lowercase : Optional[Any] ):
_a = getattr(lowercase , "handle_key" , [] )
handle += keys
setattr(lowercase , "handle_key" , lowercase )
return func
return decorator
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __new__( cls : int , __a : Dict , __a : Any , __a : Any ):
_a = super().__new__(cls , __a , __a , __a )
if not hasattr(__a , "key_handler" ):
setattr(__a , "key_handler" , {} )
setattr(__a , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
_a = getattr(__a , "handle_key" , [] )
for key in handled_keys:
_a = value
return new_cls
@staticmethod
def UpperCamelCase__ ( cls : Union[str, Any] ):
_a = get_character()
if char != KEYMAP["undefined"]:
_a = ord(__a )
_a = cls.key_handler.get(__a )
if handler:
_a = char
return handler(cls )
else:
return None
def _lowerCamelCase ( cls : List[str] ) -> Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 346 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 1 |
'''simple docstring'''
import numpy as np
lowerCAmelCase_ : str = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any ):
_a = np.array(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a , _a = np.where(letter == self.SQUARE )
_a = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : int ):
_a = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase__ ( self : int , __a : str ):
_a = message.lower()
_a = message.replace(" " , "" )
_a = message.replace("j" , "i" )
_a = np.empty((2, len(__a )) )
for letter_index in range(len(__a ) ):
_a = self.letter_to_numbers(message[letter_index] )
_a = numbers[0]
_a = numbers[1]
_a = first_step.reshape(2 * len(__a ) )
_a = ""
for numbers_index in range(len(__a ) ):
_a = int(second_step[numbers_index * 2] )
_a = int(second_step[(numbers_index * 2) + 1] )
_a = self.numbers_to_letter(__a , __a )
_a = encoded_message + letter
return encoded_message
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
_a = message.lower()
message.replace(" " , "" )
_a = np.empty(2 * len(__a ) )
for letter_index in range(len(__a ) ):
_a = self.letter_to_numbers(message[letter_index] )
_a = numbers[0]
_a = numbers[1]
_a = first_step.reshape((2, len(__a )) )
_a = ""
for numbers_index in range(len(__a ) ):
_a = int(second_step[0, numbers_index] )
_a = int(second_step[1, numbers_index] )
_a = self.numbers_to_letter(__a , __a )
_a = decoded_message + letter
return decoded_message
| 346 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='instructblip_vision_model'
def __init__( self : Optional[int] , __a : Optional[Any]=14_08 , __a : Dict=61_44 , __a : str=39 , __a : List[Any]=16 , __a : Optional[Any]=2_24 , __a : Optional[Any]=14 , __a : Optional[Any]="gelu" , __a : Tuple=1e-6 , __a : str=0.0 , __a : Union[str, Any]=1e-1_0 , __a : Optional[Any]=True , **__a : Union[str, Any] , ):
super().__init__(**__a )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
_a = qkv_bias
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Optional[Any] ):
cls._set_token_in_kwargs(__a )
_a , _a = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__a , **__a )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='instructblip_qformer'
def __init__( self : Optional[int] , __a : Union[str, Any]=3_05_22 , __a : str=7_68 , __a : int=12 , __a : Dict=12 , __a : Dict=30_72 , __a : Any="gelu" , __a : Any=0.1 , __a : str=0.1 , __a : Union[str, Any]=5_12 , __a : int=0.02 , __a : Dict=1e-1_2 , __a : Dict=0 , __a : Dict="absolute" , __a : Any=2 , __a : Optional[int]=14_08 , **__a : Tuple , ):
super().__init__(pad_token_id=__a , **__a )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = cross_attention_frequency
_a = encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ):
cls._set_token_in_kwargs(__a )
_a , _a = cls.get_config_dict(__a , **__a )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_a = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__a , **__a )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='instructblip'
__a =True
def __init__( self : List[Any] , __a : Any=None , __a : Union[str, Any]=None , __a : int=None , __a : Optional[Any]=32 , **__a : Any ):
super().__init__(**__a )
if vision_config is None:
_a = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_a = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_a = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_a = InstructBlipVisionConfig(**__a )
_a = InstructBlipQFormerConfig(**__a )
_a = text_config["model_type"] if "model_type" in text_config else "opt"
_a = CONFIG_MAPPING[text_model_type](**__a )
_a = self.text_config.tie_word_embeddings
_a = self.text_config.is_encoder_decoder
_a = num_query_tokens
_a = self.vision_config.hidden_size
_a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a = 1.0
_a = 0.02
@classmethod
def UpperCamelCase__ ( cls : Optional[Any] , __a : InstructBlipVisionConfig , __a : InstructBlipQFormerConfig , __a : PretrainedConfig , **__a : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__a , )
def UpperCamelCase__ ( self : Dict ):
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.qformer_config.to_dict()
_a = self.text_config.to_dict()
_a = self.__class__.model_type
return output
| 346 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : Optional[int] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] , lowercase : int ) -> tuple[float, list[float]]:
_a = list(range(len(lowercase ) ) )
_a = [v / w for v, w in zip(lowercase , lowercase )]
index.sort(key=lambda lowercase : ratio[i] , reverse=lowercase )
_a = 0
_a = [0] * len(lowercase )
for i in index:
if weight[i] <= capacity:
_a = 1
max_value += value[i]
capacity -= weight[i]
else:
_a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase_ : str = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase_ : List[str] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCAmelCase_ : Tuple = model.state_dict()
lowerCAmelCase_ : int = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase_ : Dict = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowerCAmelCase_ : Dict = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowerCAmelCase_ : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase_ : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowerCAmelCase_ : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowerCAmelCase_ : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowerCAmelCase_ : int = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowerCAmelCase_ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowerCAmelCase_ : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowerCAmelCase_ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowerCAmelCase_ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowerCAmelCase_ : List[Any] = state_dict['cls.predictions.decoder.weight']
lowerCAmelCase_ : List[str] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase_ : Any = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowerCAmelCase_ : Optional[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : Optional[int] , __a : int=2 , __a : Optional[Any]=32 , __a : Union[str, Any]=16 , __a : Dict=3 , __a : Optional[Any]=True , __a : Dict=True , __a : str=32 , __a : List[Any]=4 , __a : Union[str, Any]=[0, 1, 2, 3] , __a : int=4 , __a : int=37 , __a : List[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Dict=0.02 , __a : Optional[int]=3 , __a : Dict=[1, 3_84, 24, 24] , __a : Optional[Any]=True , __a : int=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = backbone_out_indices
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = num_labels
_a = backbone_featmap_shape
_a = scope
_a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def UpperCamelCase__ ( self : Optional[int] ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : Optional[int] ):
_a = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__a , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase__ ( self : Any , __a : Optional[Any] , __a : Optional[Any] , __a : Optional[Any] ):
_a = DPTModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Tuple , __a : Dict ):
_a = self.num_labels
_a = DPTForDepthEstimation(__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase__ ( self : List[Any] , __a : Any , __a : List[str] , __a : int ):
_a = self.num_labels
_a = DPTForSemanticSegmentation(__a )
model.to(__a )
model.eval()
_a = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : Optional[int] ):
_a = DPTModelTester(self )
_a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCamelCase__ ( self : Tuple ):
pass
def UpperCamelCase__ ( self : Tuple ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ ( self : List[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__a )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
def UpperCamelCase__ ( self : int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
if model_class in get_values(__a ):
continue
_a = model_class(__a )
model.to(__a )
model.train()
_a = self._prepare_for_class(__a , __a , return_labels=__a )
_a = model(**__a ).loss
loss.backward()
def UpperCamelCase__ ( self : Tuple ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = False
_a = True
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
_a = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
_a = self._prepare_for_class(__a , __a , return_labels=__a )
_a = model(**__a ).loss
loss.backward()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__a )
for model_class in self.all_model_classes:
_a = model_class(config=__a )
# Skip the check for the backbone
_a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self : Tuple ):
pass
@slow
def UpperCamelCase__ ( self : Tuple ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a = DPTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ ( self : Tuple ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = "add"
with self.assertRaises(__a ):
_a = DPTForDepthEstimation(__a )
def _lowerCamelCase ( ) -> Any:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
_a = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__a )
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_a = model(**__a )
_a = outputs.predicted_depth
# verify the predicted depth
_a = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __a )
_a = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __a , atol=1e-4 ) )
| 346 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> str:
_a = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
_a , _a = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = str(lowercase ).strip()
if not number:
raise ValueError("No input value was provided" )
_a = "-" if number.startswith("-" ) else ""
_a = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F'{negative}0b{binary_recursive(int(lowercase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 346 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ : Dict = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =PegasusConfig
__a ={}
__a ='gelu'
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=13 , __a : List[Any]=7 , __a : int=True , __a : List[Any]=False , __a : Optional[int]=99 , __a : Optional[Any]=32 , __a : Optional[Any]=5 , __a : Union[str, Any]=4 , __a : Dict=37 , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : Union[str, Any]=20 , __a : List[Any]=2 , __a : Optional[Any]=1 , __a : List[Any]=0 , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
def UpperCamelCase__ ( self : Optional[Any] ):
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_a = np.concatenate([input_ids, eos_tensor] , axis=1 )
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a = prepare_pegasus_inputs_dict(__a , __a , __a )
return config, inputs_dict
def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : str , __a : Tuple ):
_a = 20
_a = model_class_name(__a )
_a = model.encode(inputs_dict["input_ids"] )
_a , _a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_a = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
_a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_a = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_a = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_a = model.decode(__a , __a )
_a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def UpperCamelCase__ ( self : List[str] , __a : int , __a : Optional[Any] , __a : Any ):
_a = 20
_a = model_class_name(__a )
_a = model.encode(inputs_dict["input_ids"] )
_a , _a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_a = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
_a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_a = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_a = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_a = model.decode(__a , __a , decoder_attention_mask=__a )
_a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Tuple=None , lowercase : List[str]=None , ) -> Dict:
if attention_mask is None:
_a = np.not_equal(lowercase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_a = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__a =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__a =True
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : Tuple ):
_a = FlaxPegasusModelTester(self )
_a = ConfigTester(self , config_class=__a )
def UpperCamelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Dict ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a )
def UpperCamelCase__ ( self : int ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a )
def UpperCamelCase__ ( self : str ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a = self._prepare_for_class(__a , __a )
_a = model_class(__a )
@jax.jit
def encode_jitted(__a : Union[str, Any] , __a : Any=None , **__a : Any ):
return model.encode(input_ids=__a , attention_mask=__a )
with self.subTest("JIT Enabled" ):
_a = encode_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_a = encode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a = model_class(__a )
_a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__a : Union[str, Any] , __a : List[Any] , __a : str ):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest("JIT Enabled" ):
_a = decode_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_a = decode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__a )
_a = np.ones((1, 1) )
_a = model(__a )
self.assertIsNotNone(__a )
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
_a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_a = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_a = tokenizer(__a , return_tensors="np" , truncation=__a , max_length=5_12 , padding=__a )
_a = model.generate(**__a , num_beams=2 ).sequences
_a = tokenizer.batch_decode(__a , skip_special_tokens=__a )
assert tgt_text == decoded
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
'''simple docstring'''
import string
from math import logaa
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> int:
_a = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_a = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> tuple[int, int]:
_a = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_a = corpus_without_punctuation.split("\n" )
_a = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : Tuple=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> float:
return round(tf * idf , 3 )
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] ):
_a = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaises(__a ):
_a = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def UpperCamelCase__ ( self : int ):
with self.assertRaises(__a ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def UpperCamelCase__ ( self : int ):
_a = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCamelCase__ ( self : List[str] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def UpperCamelCase__ ( self : str ):
_a = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCamelCase__ ( self : List[Any] ):
_a = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def UpperCamelCase__ ( self : List[str] ):
_a = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def UpperCamelCase__ ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_a = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def UpperCamelCase__ ( self : List[Any] ):
_a = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def UpperCamelCase__ ( self : str ):
_a = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def UpperCamelCase__ ( self : str ):
import PIL.Image
_a = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__a ) as mock_cast_to_python_objects:
_a = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_a , _a = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __a )
self.assertFalse(kwargs["optimize_list_casting"] )
def _lowerCamelCase ( lowercase : List[str] , lowercase : int ) -> List[str]:
_a = pa.BufferReader(lowercase ) if isinstance(lowercase , pa.Buffer ) else pa.memory_map(lowercase )
_a = pa.ipc.open_stream(lowercase )
_a = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Tuple ) -> Any:
_a = pa.BufferOutputStream()
_a = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = pa.BufferOutputStream()
_a = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=lowercase , features=lowercase ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_a = pa.BufferReader(output.getvalue() )
_a = pa.ipc.open_stream(lowercase )
_a = f.read_all()
_a = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowercase )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Tuple:
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase , writer_batch_size=lowercase , hash_salt="split_name" , check_duplicates=lowercase , ) as writer:
with pytest.raises(lowercase ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_a , _a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[Any]:
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase , writer_batch_size=lowercase , hash_salt="split_name" , check_duplicates=lowercase , ) as writer:
with pytest.raises(lowercase ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_a , _a = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def _lowerCamelCase ( lowercase : str ) -> List[str]:
_a = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase , writer_batch_size=lowercase , hash_salt="split_name" , check_duplicates=lowercase , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( lowercase : int , lowercase : List[Any] ) -> Dict:
_a = pa.BufferOutputStream()
_a = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[str]:
_a = pa.BufferOutputStream()
_a = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( lowercase : List[str] , lowercase : str ) -> Any:
_a = pa.BufferOutputStream()
_a = pa.schema(lowercase ) if fields else None
with ArrowWriter(stream=lowercase , schema=lowercase , writer_batch_size=lowercase ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_a = {"col_1": pa.string(), "col_2": pa.intaa()}
_a = os.path.join(lowercase , "test.arrow" )
with ArrowWriter(path=lowercase , schema=pa.schema(lowercase ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowercase , metadata=writer._schema.metadata )
_check_output(lowercase , 1 )
def _lowerCamelCase ( lowercase : Dict ) -> Any:
if pa.types.is_list(lowercase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> Optional[Any]:
if isinstance(lst[0] , lowercase ):
change_first_primitive_element_in_list(lst[0] , lowercase )
else:
_a = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[Any] , lowercase : int ) -> List[Any]:
_a = pa.array(TypedSequence(lowercase , optimized_int_type=lowercase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Tuple ) -> Optional[Any]:
# in range
_a = pa.array(OptimizedTypedSequence(lowercase , col=lowercase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_a = copy.deepcopy(lowercase )
_a = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowercase , lowercase )
_a = pa.array(OptimizedTypedSequence(lowercase , col=lowercase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any ) -> int:
_a = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=lowercase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Tuple:
_a = "mock://dataset-train.arrow"
with ArrowWriter(path=lowercase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowercase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowercase )
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = pa.BufferOutputStream()
with ParquetWriter(stream=lowercase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_a , _a = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(lowercase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def _lowerCamelCase ( lowercase : Dict , lowercase : int ) -> int:
import PIL.Image
_a = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase , format="png" )
_a = pa.BufferOutputStream()
with ParquetWriter(
stream=lowercase , features=Features({"image": Image()} ) , embed_local_files=lowercase ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_a = pa.BufferReader(output.getvalue() )
_a = pq.read_table(lowercase )
_a = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , lowercase )
with open(lowercase , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase ( ) -> List[str]:
_a = pa.schema([pa.field("col_1" , pa.string() , nullable=lowercase )] )
_a = pa.BufferOutputStream()
with ArrowWriter(stream=lowercase ) as writer:
writer._build_writer(inferred_schema=lowercase )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 346 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> int:
_a = [1]
_a , _a , _a = 0, 0, 0
_a = ugly_nums[ia] * 2
_a = ugly_nums[ia] * 3
_a = ugly_nums[ia] * 5
for _ in range(1 , lowercase ):
_a = min(lowercase , lowercase , lowercase )
ugly_nums.append(lowercase )
if next_num == next_a:
ia += 1
_a = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_a = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_a = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""")
| 346 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =None
__a =None
@property
def UpperCamelCase__ ( self : str ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self : str ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__a , "feature_size" ) )
self.assertTrue(hasattr(__a , "sampling_rate" ) )
self.assertTrue(hasattr(__a , "padding_value" ) )
def UpperCamelCase__ ( self : List[str] ):
_a = self.feat_extract_tester.prepare_inputs_for_common()
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__a ) == len(__a ) for x, y in zip(__a , processed_features[input_name] ) ) )
_a = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a )
_a = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
_a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a )
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
_a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase__ ( self : Any ):
_a = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a )
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
_a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any]=False ):
def _inputs_have_equal_length(__a : Optional[Any] ):
_a = len(input[0] )
for input_slice in input[1:]:
if len(__a ) != length:
return False
return True
def _inputs_are_equal(__a : List[str] , __a : str ):
if len(__a ) != len(__a ):
return False
for input_slice_a, input_slice_a in zip(__a , __a ):
if not np.allclose(np.asarray(__a ) , np.asarray(__a ) , atol=1e-3 ):
return False
return True
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = self.feat_extract_tester.prepare_inputs_for_common(numpify=__a )
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
_a = self.feat_extract_tester.seq_length_diff
_a = self.feat_extract_tester.max_seq_length + pad_diff
_a = self.feat_extract_tester.min_seq_length
_a = self.feat_extract_tester.batch_size
_a = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a = feat_extract.pad(__a , padding=__a )
_a = input_a[input_name]
_a = feat_extract.pad(__a , padding="longest" )
_a = input_a[input_name]
_a = feat_extract.pad(__a , padding="max_length" , max_length=len(speech_inputs[-1] ) )
_a = input_a[input_name]
_a = feat_extract.pad(__a , padding="longest" , return_tensors="np" )
_a = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__a ):
feat_extract.pad(__a , padding="max_length" )[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , max_length=__a , return_tensors="np" )
_a = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_are_equal(__a , __a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a = feat_extract.pad(__a , pad_to_multiple_of=10 )
_a = input_a[input_name]
_a = feat_extract.pad(__a , padding="longest" , pad_to_multiple_of=10 )
_a = input_a[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , pad_to_multiple_of=10 , max_length=__a )
_a = input_a[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , pad_to_multiple_of=10 , max_length=__a , return_tensors="np" , )
_a = input_a[input_name]
self.assertTrue(all(len(__a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__a , __a ) )
_a = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCamelCase__ ( self : Any , __a : List[str]=False ):
def _inputs_have_equal_length(__a : Any ):
_a = len(input[0] )
for input_slice in input[1:]:
if len(__a ) != length:
return False
return True
def _inputs_are_equal(__a : Union[str, Any] , __a : Any ):
if len(__a ) != len(__a ):
return False
for input_slice_a, input_slice_a in zip(__a , __a ):
if not np.allclose(np.asarray(__a ) , np.asarray(__a ) , atol=1e-3 ):
return False
return True
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = self.feat_extract_tester.prepare_inputs_for_common(numpify=__a )
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=__a )
_a = input_a[input_name]
_a = feat_extract.pad(__a , padding="max_length" , max_length=len(speech_inputs[0] ) )
_a = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertFalse(_inputs_have_equal_length(__a ) )
# truncate to smallest with np
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=__a , )
_a = input_a[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
_a = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__a ) )
# truncate to middle
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=__a , return_tensors="np" , )
_a = input_a[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=__a )
_a = input_a[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
_a = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertTrue(_inputs_are_equal(__a , __a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a ):
feat_extract.pad(__a , truncation=__a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a ):
feat_extract.pad(__a , padding="longest" , truncation=__a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a ):
feat_extract.pad(__a , padding="longest" , truncation=__a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__a ):
feat_extract.pad(__a , padding="max_length" , truncation=__a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a = 12
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__a , truncation=__a , )
_a = input_a[input_name]
_a = feat_extract.pad(
__a , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__a , )
_a = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__a ) )
self.assertFalse(_inputs_have_equal_length(__a ) )
def UpperCamelCase__ ( self : str ):
self._check_padding(numpify=__a )
def UpperCamelCase__ ( self : Tuple ):
self._check_padding(numpify=__a )
def UpperCamelCase__ ( self : List[str] ):
self._check_truncation(numpify=__a )
def UpperCamelCase__ ( self : Dict ):
self._check_truncation(numpify=__a )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = self.feat_extract_tester.prepare_inputs_for_common()
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
_a = feat_extract.pad(__a , padding="longest" , return_tensors="np" )[input_name]
_a = feat_extract.pad(__a , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = self.feat_extract_tester.prepare_inputs_for_common()
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
_a = feat_extract.pad(__a , padding="longest" , return_tensors="np" )[input_name]
_a = feat_extract.pad(__a , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.feat_extract_dict
_a = True
_a = self.feature_extraction_class(**__a )
_a = self.feat_extract_tester.prepare_inputs_for_common()
_a = [len(__a ) for x in speech_inputs]
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
_a = feat_extract.pad(__a , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , __a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __a )
def UpperCamelCase__ ( self : int ):
_a = self.feat_extract_dict
_a = True
_a = self.feature_extraction_class(**__a )
_a = self.feat_extract_tester.prepare_inputs_for_common()
_a = [len(__a ) for x in speech_inputs]
_a = feat_extract.model_input_names[0]
_a = BatchFeature({input_name: speech_inputs} )
_a = min(__a )
_a = feat_extract.pad(
__a , padding="max_length" , max_length=__a , truncation=__a , return_tensors="np" )
self.assertIn("attention_mask" , __a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 346 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ : Dict = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase_ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase_ : Dict = 'zero2'
lowerCAmelCase_ : Optional[Any] = 'zero3'
lowerCAmelCase_ : Optional[Any] = [ZEROa, ZEROa]
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_a = parameterized.to_safe_name("_".join(str(lowercase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] , __a : Any ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : Tuple ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : str ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCamelCase__ ( self : Optional[Any] , __a : str , __a : str , __a : int = 10 , __a : bool = True , __a : bool = True , __a : bool = True , ):
_a = models[model]
_a = self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : str , __a : int = 10 , __a : int = 1 , __a : bool = True , __a : bool = True , ):
_a = self.get_auto_remove_tmp_dir("./xxx" , after=__a )
_a = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_a = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
_a = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
_a = self.get_launcher(__a )
_a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def UpperCamelCase__ ( self : int , __a : Union[str, Any]=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_a = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 346 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_a = math.sqrt(lowercase )
_a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : int , lowercase : int , lowercase : int ) -> np.ndarray:
_a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _lowerCamelCase ( lowercase : int , lowercase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowercase ):
for j in range(0 , lowercase ):
_a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowercase , lowercase )
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : float , lowercase : float , lowercase : int , ) -> np.ndarray:
_a = np.zeros(img.shape )
_a = get_gauss_kernel(lowercase , lowercase )
_a , _a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_a = get_slice(lowercase , lowercase , lowercase , lowercase )
_a = img_s - img_s[kernel_size // 2, kernel_size // 2]
_a = vec_gaussian(lowercase , lowercase )
_a = np.multiply(lowercase , lowercase )
_a = np.multiply(lowercase , lowercase )
_a = np.sum(lowercase ) / np.sum(lowercase )
_a = val
return imga
def _lowerCamelCase ( lowercase : list ) -> tuple:
_a = args[1] if args[1:] else "../image_data/lena.jpg"
_a = float(args[2] ) if args[2:] else 1.0
_a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_a = int(args[4] )
_a = kernel_size + abs(kernel_size % 2 - 1 )
else:
_a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = parse_args(sys.argv)
lowerCAmelCase_ : Optional[int] = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCAmelCase_ : str = img / 2_55
lowerCAmelCase_ : Tuple = out.astype('float32')
lowerCAmelCase_ : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase_ : str = out * 2_55
lowerCAmelCase_ : int = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def _lowerCamelCase ( ) -> int:
_a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowercase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowercase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowercase , type=lowercase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowercase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : str ) -> Dict:
def fn(lowercase : Dict ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase ( lowercase : Optional[Any] ) -> List[Any]:
_a = []
for i in range(len(tokenized_data["input_ids"] ) ):
_a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_a = tf.train.Features(feature=lowercase )
_a = tf.train.Example(features=lowercase )
_a = example.SerializeToString()
records.append(lowercase )
return records
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_a = min(len(lowercase ) , args.limit )
_a = dataset.select(range(lowercase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
_a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_a = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_a = tokenize_function(lowercase )
_a = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase : Any ):
# Concatenate all texts.
_a = {k: sum(examples[k] , [] ) for k in examples.keys()}
_a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_a = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_a = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
_a = 0
_a = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
_a = grouped_dataset[shard : shard + args.shard_size]
_a = len(dataset_snapshot["input_ids"] )
_a = os.path.join(lowercase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
_a = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_a = serialized_examples[i]
out_file.write(lowercase )
print("Wrote file {} containing {} records".format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = parse_args()
main(args)
| 346 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = '▁'
lowerCAmelCase_ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : Dict = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ : Dict = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase_ : Optional[int] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =['input_ids', 'attention_mask']
__a =[]
__a =[]
def __init__( self : Optional[int] , __a : Optional[Any] , __a : Dict="<s>" , __a : int="</s>" , __a : Tuple="</s>" , __a : Union[str, Any]="<s>" , __a : int="<unk>" , __a : Dict="<pad>" , __a : int="<mask>" , __a : Union[str, Any]=None , __a : List[str]=None , __a : Union[str, Any]=None , __a : Optional[Dict[str, Any]] = None , __a : Optional[int]=None , **__a : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
_a = len(self.sp_model )
_a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
_a = {v: k for k, v in self.lang_code_to_id.items()}
_a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_a = src_lang if src_lang is not None else "en_XX"
_a = self.lang_code_to_id[self._src_lang]
_a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ):
_a = self.__dict__.copy()
_a = None
_a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , __a : List[Any] ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self : List[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self : str , __a : str ):
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self : int , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : List[Any] , __a : str , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_a = src_lang
_a = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
_a = self.convert_tokens_to_ids(__a )
_a = tgt_lang_id
return inputs
def UpperCamelCase__ ( self : str ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self : Any , __a : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self : Optional[int] , __a : List[str] ):
_a = "".join(__a ).replace(__a , " " ).strip()
return out_string
def UpperCamelCase__ ( self : str , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def UpperCamelCase__ ( self : Optional[int] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : Tuple , ):
_a = src_lang
_a = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def UpperCamelCase__ ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self : int , __a : Union[str, Any] ):
_a = self.lang_code_to_id[src_lang]
_a = []
_a = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = self.lang_code_to_id[lang]
_a = []
_a = [self.eos_token_id, self.cur_lang_code]
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase_ : List[Any] = '\\n\n'
lowerCAmelCase_ : Optional[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase_ : Optional[int] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCamelCase__ ( self : List[str] , __a : Optional[int] , __a : Optional[int] , __a : int = 16 , __a : bool = True , __a : Dict=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a = "cuda"
else:
_a = "cuda" if torch.cuda.is_available() else "cpu"
_a = AutoModelForCausalLM.from_pretrained(__a )
_a = model.to(__a )
_a = AutoTokenizer.from_pretrained(__a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a = model.config.max_length - 1
else:
_a = model.config.max_length
_a = tokenizer(
__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , return_tensors="pt" , return_attention_mask=__a , ).to(__a )
_a = encodings["input_ids"]
_a = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a = []
_a = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__a ) , __a ) ):
_a = min(start_index + batch_size , len(__a ) )
_a = encoded_texts[start_index:end_index]
_a = attn_masks[start_index:end_index]
if add_start_token:
_a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__a )
_a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__a ), attn_mask] , dim=1 )
_a = encoded_batch
with torch.no_grad():
_a = model(__a , attention_mask=__a ).logits
_a = out_logits[..., :-1, :].contiguous()
_a = labels[..., 1:].contiguous()
_a = attn_mask[..., 1:].contiguous()
_a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__a )}
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[Any] ) -> str:
_a = ""
for i in table:
res += inp[i - 1]
return res
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[Any]:
return data[1:] + data[0]
def _lowerCamelCase ( lowercase : int , lowercase : Optional[Any] ) -> List[Any]:
_a = ""
for i in range(len(lowercase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[int] ) -> Optional[Any]:
_a = int("0b" + data[0] + data[-1] , 2 )
_a = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Any , lowercase : Dict ) -> Any:
_a = message[:4]
_a = message[4:]
_a = apply_table(lowercase , lowercase )
_a = xor(lowercase , lowercase )
_a = apply_sbox(lowercase , temp[:4] ) # noqa: E741
_a = apply_sbox(lowercase , temp[4:] )
_a = "0" * (2 - len(lowercase )) + l # noqa: E741
_a = "0" * (2 - len(lowercase )) + r
_a = apply_table(l + r , lowercase )
_a = xor(lowercase , lowercase )
return temp + right
if __name__ == "__main__":
lowerCAmelCase_ : str = input('Enter 10 bit key: ')
lowerCAmelCase_ : Dict = input('Enter 8 bit message: ')
lowerCAmelCase_ : Optional[int] = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase_ : List[str] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase_ : int = [2, 4, 3, 1]
lowerCAmelCase_ : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase_ : str = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase_ : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase_ : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase_ : Any = apply_table(key, paa_table)
lowerCAmelCase_ : List[Any] = temp[:5]
lowerCAmelCase_ : Any = temp[5:]
lowerCAmelCase_ : Optional[Any] = left_shift(left)
lowerCAmelCase_ : int = left_shift(right)
lowerCAmelCase_ : List[Any] = apply_table(left + right, pa_table)
lowerCAmelCase_ : Optional[Any] = left_shift(left)
lowerCAmelCase_ : Tuple = left_shift(right)
lowerCAmelCase_ : Tuple = left_shift(left)
lowerCAmelCase_ : int = left_shift(right)
lowerCAmelCase_ : Dict = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase_ : List[Any] = apply_table(message, IP)
lowerCAmelCase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ : str = temp[4:] + temp[:4]
lowerCAmelCase_ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ : Any = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCAmelCase_ : List[str] = apply_table(CT, IP)
lowerCAmelCase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ : int = temp[4:] + temp[:4]
lowerCAmelCase_ : Dict = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ : int = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase_ : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[Any]=None ) -> str:
if rng is None:
_a = random.Random()
_a = 1
for dim in shape:
total_dims *= dim
_a = []
for _ in range(lowercase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_a = np.array(lowercase , dtype=jnp.intaa ).reshape(lowercase )
return output
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[str]=None ) -> int:
_a = ids_tensor(lowercase , vocab_size=2 , rng=lowercase )
# make sure that at least one token is attended to for each batch
_a = 1
return attn_mask
@require_flax
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =None
__a =()
def UpperCamelCase__ ( self : Optional[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_a = 2
_a = inputs["input_ids"].shape[-1] // 2
_a = inputs["input_ids"][:max_batch_size, :sequence_length]
_a = jnp.ones_like(__a )
_a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ ( self : List[str] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 0
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a = getattr(__a , __a )
_a = pt_model_class(__a ).eval()
_a = load_flax_weights_in_pytorch_model(__a , flax_model.params )
_a = flax_model.generate(__a ).sequences
_a = pt_model.generate(torch.tensor(__a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : List[Any] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
_a = 0.8
_a = 10
_a = 0.3
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Any ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : int ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 2
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : List[Any] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = 2
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_a = "Hello world"
_a = tokenizer(__a , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__a , "do_samples" ):
model.generate(__a , do_samples=__a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__a , "foo" ):
_a = {"foo": "bar"}
model.generate(__a , **__a )
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='detr'
__a =['past_key_values']
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : str , __a : Dict=True , __a : Optional[Any]=None , __a : Dict=3 , __a : Any=1_00 , __a : List[str]=6 , __a : List[str]=20_48 , __a : Dict=8 , __a : Optional[Any]=6 , __a : Tuple=20_48 , __a : Optional[Any]=8 , __a : str=0.0 , __a : Union[str, Any]=0.0 , __a : Tuple=True , __a : Union[str, Any]="relu" , __a : List[Any]=2_56 , __a : int=0.1 , __a : List[str]=0.0 , __a : Optional[int]=0.0 , __a : Union[str, Any]=0.02 , __a : List[str]=1.0 , __a : Optional[Any]=False , __a : Union[str, Any]="sine" , __a : Any="resnet50" , __a : Union[str, Any]=True , __a : Any=False , __a : List[Any]=1 , __a : Dict=5 , __a : Optional[int]=2 , __a : List[Any]=1 , __a : Union[str, Any]=1 , __a : Tuple=5 , __a : Tuple=2 , __a : List[Any]=0.1 , **__a : Any , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__a , __a ):
_a = backbone_config.get("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
# set timm attributes to None
_a , _a , _a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : str ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : str ):
return self.d_model
@classmethod
def UpperCamelCase__ ( cls : List[Any] , __a : PretrainedConfig , **__a : Union[str, Any] ):
return cls(backbone_config=__a , **__a )
def UpperCamelCase__ ( self : Any ):
_a = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =version.parse('1.11' )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self : str ):
return 1e-5
@property
def UpperCamelCase__ ( self : List[str] ):
return 12
| 346 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCamelCase ( ) -> int:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_a = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCamelCase ( ) -> int:
assert _test_patching.open is open
_a = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCamelCase ( ) -> Optional[Any]:
# pandas.read_csv is not present in _test_patching
_a = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowercase ):
pass
def _lowerCamelCase ( ) -> Tuple:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_a = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowercase ) is None
with patch_submodule(_test_patching , "len" , lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCamelCase ( ) -> Tuple:
_a = "__test_patch_submodule_start_and_stop_mock__"
_a = patch_submodule(_test_patching , "open" , lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCamelCase ( ) -> Dict:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_a = "__test_patch_submodule_successive_join__"
_a = "__test_patch_submodule_successive_dirname__"
_a = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowercase ):
with patch_submodule(_test_patching , "os.rename" , lowercase ):
with patch_submodule(_test_patching , "os.path.dirname" , lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowercase ):
with patch_submodule(_test_patching , "os.path.join" , lowercase ):
with patch_submodule(_test_patching , "os.path.dirname" , lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCamelCase ( ) -> Optional[int]:
_a = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowercase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowercase ):
pass
| 346 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : list[int] ) -> list[int]:
_a = len(lowercase )
for i in range(lowercase ):
for j in range(i + 1 , lowercase ):
if numbers[j] < numbers[i]:
_a , _a = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase_ : str = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ : str = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 346 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 1 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , __a : List[str] , __a : List[Any] ):
_a = name
_a = val
def __str__( self : Optional[Any] ):
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self : Optional[int] , __a : Optional[int] ):
return self.val < other.val
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , __a : Optional[Any] ):
_a = {}
_a = {}
_a = self.build_heap(__a )
def __getitem__( self : Optional[Any] , __a : List[str] ):
return self.get_value(__a )
def UpperCamelCase__ ( self : str , __a : Any ):
return (idx - 1) // 2
def UpperCamelCase__ ( self : Tuple , __a : Union[str, Any] ):
return idx * 2 + 1
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
return idx * 2 + 2
def UpperCamelCase__ ( self : List[Any] , __a : Dict ):
return self.heap_dict[key]
def UpperCamelCase__ ( self : Dict , __a : Any ):
_a = len(__a ) - 1
_a = self.get_parent_idx(__a )
for idx, i in enumerate(__a ):
_a = idx
_a = i.val
for i in range(__a , -1 , -1 ):
self.sift_down(__a , __a )
return array
def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : Optional[Any] ):
while True:
_a = self.get_left_child_idx(__a ) # noqa: E741
_a = self.get_right_child_idx(__a )
_a = idx
if l < len(__a ) and array[l] < array[idx]:
_a = l
if r < len(__a ) and array[r] < array[smallest]:
_a = r
if smallest != idx:
_a , _a = array[smallest], array[idx]
(
(
_a
) , (
_a
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_a = smallest
else:
break
def UpperCamelCase__ ( self : Any , __a : Optional[int] ):
_a = self.get_parent_idx(__a )
while p >= 0 and self.heap[p] > self.heap[idx]:
_a , _a = self.heap[idx], self.heap[p]
_a , _a = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_a = p
_a = self.get_parent_idx(__a )
def UpperCamelCase__ ( self : Tuple ):
return self.heap[0]
def UpperCamelCase__ ( self : int ):
_a , _a = self.heap[-1], self.heap[0]
_a , _a = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_a = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any] ):
self.heap.append(__a )
_a = len(self.heap ) - 1
_a = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCamelCase__ ( self : str ):
return len(self.heap ) == 0
def UpperCamelCase__ ( self : Optional[int] , __a : Any , __a : str ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_a = new_value
_a = new_value
self.sift_up(self.idx_of_element[node] )
lowerCAmelCase_ : List[Any] = Node('R', -1)
lowerCAmelCase_ : Dict = Node('B', 6)
lowerCAmelCase_ : List[str] = Node('A', 3)
lowerCAmelCase_ : Tuple = Node('X', 1)
lowerCAmelCase_ : Union[str, Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase_ : int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : str = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : Tuple , lowercase : str , lowercase : Optional[int] , lowercase : str ) -> Union[str, Any]:
_a = original_name.split("." )[0]
_a = key.split("." )
_a = int(key_list[key_list.index(lowercase ) - 2] )
_a = int(key_list[key_list.index(lowercase ) - 1] )
_a = orig_block_num - offset
_a = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _lowerCamelCase ( lowercase : Tuple ) -> Optional[int]:
_a = OrderedDict()
_a , _a = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_a = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_a = key[: key.find("proj" )]
_a = key.replace(lowercase , F'patch_embeddings.{total_embed_found}.' )
_a = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_a = "poolformer.encoder." + key
if "mlp.fc1" in key:
_a = replace_key_with_offset(lowercase , lowercase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_a = replace_key_with_offset(lowercase , lowercase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_a = replace_key_with_offset(lowercase , lowercase , "norm1" , "before_norm" )
if "norm2" in key:
_a = replace_key_with_offset(lowercase , lowercase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_a = replace_key_with_offset(lowercase , lowercase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_a = replace_key_with_offset(lowercase , lowercase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_a = key.replace("head" , "classifier" )
_a = value
return new_state_dict
def _lowerCamelCase ( ) -> Dict:
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Tuple ) -> Optional[Any]:
_a = PoolFormerConfig()
# set attributes based on model_name
_a = "huggingface/label-files"
_a = model_name[-3:]
_a = 1000
_a = "imagenet-1k-id2label.json"
_a = (1, 1000)
# set config attributes
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
if size == "s12":
_a = [2, 2, 6, 2]
_a = [64, 128, 320, 512]
_a = 4.0
_a = 0.9
elif size == "s24":
_a = [4, 4, 12, 4]
_a = [64, 128, 320, 512]
_a = 4.0
_a = 0.9
elif size == "s36":
_a = [6, 6, 18, 6]
_a = [64, 128, 320, 512]
_a = 4.0
_a = 1E-6
_a = 0.9
elif size == "m36":
_a = [6, 6, 18, 6]
_a = [96, 192, 384, 768]
_a = 4.0
_a = 1E-6
_a = 0.95
elif size == "m48":
_a = [8, 8, 24, 8]
_a = [96, 192, 384, 768]
_a = 4.0
_a = 1E-6
_a = 0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_a = PoolFormerImageProcessor(crop_pct=lowercase )
# Prepare image
_a = prepare_img()
_a = image_processor(images=lowercase , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_a = torch.load(lowercase , map_location=torch.device("cpu" ) )
# rename keys
_a = rename_keys(lowercase )
# create HuggingFace model and load state dict
_a = PoolFormerForImageClassification(lowercase )
model.load_state_dict(lowercase )
model.eval()
# Define image processor
_a = PoolFormerImageProcessor(crop_pct=lowercase )
_a = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_a = model(lowercase )
_a = outputs.logits
# define expected logit slices for different models
if size == "s12":
_a = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_a = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_a = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_a = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_a = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowercase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase_ : List[str] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase_ : List[str] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase_ : Union[str, Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase_ : List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> tuple[str, float]:
_a = len([g for position, g in enumerate(lowercase ) if g == main_target[position]] )
return (item, float(lowercase ))
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> tuple[str, str]:
_a = random.randint(0 , len(lowercase ) - 1 )
_a = parent_a[:random_slice] + parent_a[random_slice:]
_a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCamelCase ( lowercase : str , lowercase : list[str] ) -> str:
_a = list(lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_a = random.choice(lowercase )
return "".join(lowercase )
def _lowerCamelCase ( lowercase : tuple[str, float] , lowercase : list[tuple[str, float]] , lowercase : list[str] , ) -> list[str]:
_a = []
# Generate more children proportionally to the fitness score.
_a = int(parent_a[1] * 100 ) + 1
_a = 10 if child_n >= 10 else child_n
for _ in range(lowercase ):
_a = population_score[random.randint(0 , lowercase )][0]
_a , _a = crossover(parent_a[0] , lowercase )
# Append new string to the population list.
pop.append(mutate(lowercase , lowercase ) )
pop.append(mutate(lowercase , lowercase ) )
return pop
def _lowerCamelCase ( lowercase : str , lowercase : list[str] , lowercase : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_a = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
_a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowercase )
# Generate random starting population.
_a = []
for _ in range(lowercase ):
population.append("".join([random.choice(lowercase ) for i in range(len(lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a = [evaluate(lowercase , lowercase ) for item in population]
# Check if there is a matching evolution.
_a = sorted(lowercase , key=lambda lowercase : x[1] , reverse=lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase )
# Normalize population score to be between 0 and 1.
_a = [
(item, score / len(lowercase )) for item, score in population_score
]
# This is selection
for i in range(lowercase ):
population.extend(select(population_score[int(lowercase )] , lowercase , lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCAmelCase_ : Any = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __a : Tuple , __a : Tuple=7 , __a : Optional[Any]=3 , __a : Tuple=18 , __a : Optional[Any]=30 , __a : Any=4_00 , __a : Any=True , __a : Dict=None , __a : str=True , __a : Optional[Any]=None , __a : str=True , __a : Any=[0.5, 0.5, 0.5] , __a : Union[str, Any]=[0.5, 0.5, 0.5] , ):
_a = size if size is not None else {"shortest_edge": 18}
_a = crop_size if crop_size is not None else {"height": 18, "width": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = do_normalize
_a = image_mean
_a = image_std
def UpperCamelCase__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : int ):
_a = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : int ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_center_crop" ) )
self.assertTrue(hasattr(__a , "size" ) )
def UpperCamelCase__ ( self : int ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase__ ( self : Optional[int] ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self : Tuple ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self : Dict ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_a = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 346 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : Tuple , __a : bool = True , __a : int = 32 , __a : int=PILImageResampling.BILINEAR , __a : bool = True , **__a : List[str] , ):
_a = do_resize
_a = do_rescale
_a = size_divisor
_a = resample
super().__init__(**__a )
def UpperCamelCase__ ( self : List[str] , __a : np.ndarray , __a : int , __a : Tuple , __a : Optional[ChannelDimension] = None , **__a : Tuple ):
_a , _a = get_image_size(__a )
# Rounds the height and width down to the closest multiple of size_divisor
_a = height // size_divisor * size_divisor
_a = width // size_divisor * size_divisor
_a = resize(__a , (new_h, new_w) , resample=__a , data_format=__a , **__a )
return image
def UpperCamelCase__ ( self : Optional[Any] , __a : np.ndarray , __a : float , __a : Optional[ChannelDimension] = None , **__a : int ):
return rescale(image=__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : Dict , __a : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __a : Optional[bool] = None , __a : Optional[int] = None , __a : List[str]=None , __a : Optional[bool] = None , __a : Optional[Union[TensorType, str]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Any , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = size_divisor if size_divisor is not None else self.size_divisor
_a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
_a = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__a ) for img in images]
if do_resize:
_a = [self.resize(__a , size_divisor=__a , resample=__a ) for image in images]
if do_rescale:
_a = [self.rescale(__a , scale=1 / 2_55 ) for image in images]
_a = [to_channel_dimension_format(__a , __a ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 346 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 1 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : Dict , lowercase : Any ) -> Optional[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
_a = TOKENIZER_CLASSES
else:
_a = {tokenizer_name: getattr(lowercase , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
_a = TOKENIZER_CLASSES[tokenizer_name]
_a = True
if checkpoint_name is None:
_a = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_a = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
_a = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
_a , _a = checkpoint.split("/" )
_a = os.path.join(lowercase , lowercase )
elif add_prefix:
_a = checkpoint
_a = dump_path
else:
_a = None
_a = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_a = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_a = file_path.split(lowercase )[-1][0]
if next_char == "/":
_a = os.path.join(lowercase , lowercase )
_a = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
_a = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowercase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCAmelCase_ : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 346 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCamelCase ( ) -> str:
_a = HfArgumentParser(lowercase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=lowercase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_a = " ".join(str(lowercase ).split(" " )[:-1] )
_a = ""
_a = eval(str(lowercase ).split(" " )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
_a = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deberta-v2'
def __init__( self : Dict , __a : List[str]=12_81_00 , __a : int=15_36 , __a : Tuple=24 , __a : Dict=24 , __a : Any=61_44 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Any=0.1 , __a : Optional[Any]=5_12 , __a : int=0 , __a : Tuple=0.02 , __a : Tuple=1e-7 , __a : List[str]=False , __a : Tuple=-1 , __a : Dict=0 , __a : Tuple=True , __a : Dict=None , __a : int=0 , __a : Tuple="gelu" , **__a : Optional[Any] , ):
super().__init__(**__a )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = relative_attention
_a = max_relative_positions
_a = pad_token_id
_a = position_biased_input
# Backwards compatibility
if type(__a ) == str:
_a = [x.strip() for x in pos_att_type.lower().split("|" )]
_a = pos_att_type
_a = vocab_size
_a = layer_norm_eps
_a = kwargs.get("pooler_hidden_size" , __a )
_a = pooler_dropout
_a = pooler_hidden_act
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def UpperCamelCase__ ( self : List[Any] ):
return 12
def UpperCamelCase__ ( self : Tuple , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , __a : "PreTrainedTokenizerBase" = None , ):
_a = super().generate_dummy_inputs(preprocessor=__a , framework=__a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : int ) -> bool:
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_a = range(3 , int(math.sqrt(lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCamelCase ( lowercase : Any , lowercase : str=1 , **lowercase : int ) -> Dict:
_a = factor * value
_a = value
while not is_prime(lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase )
return value
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 346 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _lowerCamelCase ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ) -> np.array:
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(lowercase ):
_a = y[k] + step_size * ode_func(lowercase , y[k] )
_a = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346 | 1 |
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : int ) -> str:
_a = 0
_a = 0
while num > 0:
_a = num % 8
_a = octal + (remainder * math.floor(math.pow(10 , lowercase ) ))
counter += 1
_a = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(lowercase )}'
def _lowerCamelCase ( ) -> None:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 1 |
'''simple docstring'''
lowerCAmelCase_ : dict[tuple[int, int, int], int] = {}
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_a = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_a = _calculate(days - 1 , lowercase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_a = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_a = _calculate(days - 1 , lowercase , 0 )
_a = state_late + state_absent + state_ontime
_a = prizestrings
return prizestrings
def _lowerCamelCase ( lowercase : int = 30 ) -> int:
return _calculate(lowercase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 346 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 1 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCamelCase ( lowercase : str , lowercase : str , **lowercase : List[str] ) -> Dict:
_a = AutoConfig.from_pretrained(lowercase , **lowercase )
_a = AutoModelForSeqaSeqLM.from_config(lowercase )
model.save_pretrained(lowercase )
AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : int , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : bool = True , __a : Union[int, float] = 1 / 2_55 , __a : Dict[str, int] = None , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ):
super().__init__(**__a )
_a = size if size is not None else {"height": 2_24, "width": 2_24}
_a = get_size_dict(__a )
_a = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_a = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ):
_a = get_size_dict(__a )
if "shortest_edge" in size:
_a = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size["height"], size["width"])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ):
_a = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def UpperCamelCase__ ( self : int , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : List[str] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : str , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(__a )
if not is_batched(__a ):
_a = [images]
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__a ) for image in images]
if do_resize:
_a = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_a = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_a = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_a = [to_channel_dimension_format(__a , __a ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 346 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCamelCase ( lowercase : int ) -> Any:
_a = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> List[Any]:
_a , _a = emb.weight.shape
_a = nn.Linear(lowercase , lowercase , bias=lowercase )
_a = emb.weight.data
return lin_layer
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = torch.load(lowercase , map_location="cpu" )
_a = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_a = mam_aaa["model"]
remove_ignore_keys_(lowercase )
_a = state_dict["encoder.embed_tokens.weight"].shape[0]
_a = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_a = state_dict["decoder.embed_tokens.weight"]
_a = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ : Dict = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 346 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
lowerCAmelCase_ : Union[str, Any] = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def _lowerCamelCase ( lowercase : int ) -> Any:
_a = EfficientNetConfig()
_a = CONFIG_MAP[model_name]["hidden_dim"]
_a = CONFIG_MAP[model_name]["width_coef"]
_a = CONFIG_MAP[model_name]["depth_coef"]
_a = CONFIG_MAP[model_name]["image_size"]
_a = CONFIG_MAP[model_name]["dropout_rate"]
_a = CONFIG_MAP[model_name]["dw_padding"]
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = 1000
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( ) -> Any:
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = CONFIG_MAP[model_name]["image_size"]
_a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowercase , )
return preprocessor
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> List[Any]:
_a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_a = sorted(set(lowercase ) )
_a = len(lowercase )
_a = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
_a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_a = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_a = {}
for item in rename_keys:
if item[0] in original_param_names:
_a = "efficientnet." + item[1]
_a = "classifier.weight"
_a = "classifier.bias"
return key_mapping
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : str ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_a = key_mapping[key]
if "_conv" in key and "kernel" in key:
_a = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_a = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_a = torch.from_numpy(np.transpose(lowercase ) )
else:
_a = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : Any , lowercase : List[str] , lowercase : int , lowercase : List[Any] ) -> Optional[Any]:
_a = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
_a = original_model.trainable_variables
_a = original_model.non_trainable_variables
_a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_a = param.numpy()
_a = list(tf_params.keys() )
# Load HuggingFace model
_a = get_efficientnet_config(lowercase )
_a = EfficientNetForImageClassification(lowercase ).eval()
_a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_a = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
_a = convert_image_processor(lowercase )
_a = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_a = hf_model(**lowercase )
_a = outputs.logits.detach().numpy()
# Original model inference
_a = False
_a = CONFIG_MAP[model_name]["image_size"]
_a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_a = image.img_to_array(lowercase )
_a = np.expand_dims(lowercase , axis=0 )
_a = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
_a = F'efficientnet-{model_name}'
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='encodec'
def __init__( self : int , __a : str=[1.5, 3.0, 6.0, 12.0, 24.0] , __a : Optional[int]=2_40_00 , __a : str=1 , __a : List[str]=False , __a : Union[str, Any]=None , __a : List[str]=None , __a : str=1_28 , __a : Any=32 , __a : Dict=1 , __a : Tuple=[8, 5, 4, 2] , __a : List[str]="weight_norm" , __a : List[Any]=7 , __a : Any=7 , __a : List[Any]=3 , __a : Optional[int]=2 , __a : List[Any]=True , __a : Optional[Any]="reflect" , __a : Union[str, Any]=2 , __a : Optional[int]=2 , __a : Union[str, Any]=1.0 , __a : Any=10_24 , __a : Tuple=None , __a : Optional[Any]=True , **__a : Dict , ):
_a = target_bandwidths
_a = sampling_rate
_a = audio_channels
_a = normalize
_a = chunk_length_s
_a = overlap
_a = hidden_size
_a = num_filters
_a = num_residual_layers
_a = upsampling_ratios
_a = norm_type
_a = kernel_size
_a = last_kernel_size
_a = residual_kernel_size
_a = dilation_growth_rate
_a = use_causal_conv
_a = pad_mode
_a = compress
_a = num_lstm_layers
_a = trim_right_ratio
_a = codebook_size
_a = codebook_dim if codebook_dim is not None else hidden_size
_a = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__a )
@property
def UpperCamelCase__ ( self : Dict ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self : Dict ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase__ ( self : Any ):
_a = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase_ : List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
lowerCAmelCase_ : Optional[int] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
lowerCAmelCase_ : Tuple = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
lowerCAmelCase_ : Tuple = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
lowerCAmelCase_ : List[str] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCamelCase__ ( self : str , __a : List[str] , __a : List[str] , __a : Any=[1, 10, 1_00] , __a : str=4 , __a : Any=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=__a ) as executor:
_a = []
_a = Counter()
_a = 0
_a = defaultdict(__a )
for task_id, (candidates, test_case) in enumerate(zip(__a , __a ) ):
for candidate in candidates:
_a = candidate + "\n" + test_case
_a = (test_program, timeout, task_id, completion_id[task_id])
_a = executor.submit(__a , *__a )
futures.append(__a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__a ):
_a = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_a , _a = [], []
for result in results.values():
result.sort()
_a = [r[1]["passed"] for r in result]
total.append(len(__a ) )
correct.append(sum(__a ) )
_a = np.array(__a )
_a = np.array(__a )
_a = k
_a = {f'pass@{k}': estimate_pass_at_k(__a , __a , __a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : int ) -> Dict:
def estimator(lowercase : int , lowercase : int , lowercase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase , lowercase ):
_a = itertools.repeat(lowercase , len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
_a = iter(lowercase )
return np.array([estimator(int(lowercase ) , int(lowercase ) , lowercase ) for n, c in zip(lowercase , lowercase )] )
| 346 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase ( ) -> str:
_a = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowercase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowercase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowercase )
return parser.parse_args()
def _lowerCamelCase ( ) -> Optional[int]:
_a = parse_args()
# Import training_script as a module.
_a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a = script_fpath.stem
_a = importlib.import_module(lowercase )
# Patch sys.argv
_a = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowerCAmelCase_ : Dict = {
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def _lowerCamelCase ( lowercase : Any , lowercase : List[Any] ) -> Union[str, Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = json.loads(f.read() )
_a = collections.OrderedDict()
_a = collections.OrderedDict()
_a = collections.OrderedDict()
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.readlines()
_a = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase ):
_a = b
_a = idx
for wd in b:
_a = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : Dict , __a : Optional[int] , __a : Union[str, Any] , __a : List[Any]="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : Optional[int]="<|startoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : Tuple=False , **__a : Any , ):
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
_a = do_clean_text
_a , _a , _a , _a = load_vocab_and_emoji(__a , __a )
_a = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase__ ( self : str ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCamelCase__ ( self : Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any] ):
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def UpperCamelCase__ ( self : int , __a : List[Any] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , __a : Union[str, Any] ):
return self.subword_tokenizer.convert_id_to_token(__a )
def UpperCamelCase__ ( self : str , __a : Optional[int] ):
_a = "".join(__a ).strip()
return out_string
def UpperCamelCase__ ( self : Any , __a : "Conversation" ):
_a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ ( self : Any , __a : str , __a : Optional[str] = None ):
_a = 0
if os.path.isdir(__a ):
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
_a = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
_a = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
_a = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : str , __a : str , __a : Any , __a : Optional[int] ):
_a = vocab # same as swe
_a = ids_to_tokens # same as bpe
_a = emoji
_a = np.max([len(__a ) for w in self.vocab.keys()] )
_a = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
_a = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
_a = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
_a = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
_a = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
_a = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
_a = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
_a = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
_a = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Tuple ):
return len(self.ids_to_tokens )
def UpperCamelCase__ ( self : str , __a : int ):
_a = self.content_repattera.sub("<URL>" , __a )
_a = self.content_repattera.sub("<EMAIL>" , __a )
_a = self.content_repattera.sub("<TEL>" , __a )
_a = self.content_repattera.sub("<DATE>" , __a )
_a = self.content_repattera.sub("<DATE>" , __a )
_a = self.content_repattera.sub("<PRICE>" , __a )
_a = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_a = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : int=False ):
_a = text.replace(" " , "<SP>" )
_a = text.replace(" " , "<SP>" )
_a = text.replace("\r\n" , "<BR>" )
_a = text.replace("\n" , "<BR>" )
_a = text.replace("\r" , "<BR>" )
_a = text.replace("\t" , "<TAB>" )
_a = text.replace("—" , "ー" )
_a = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
_a = text.replace(__a , __a )
if clean:
_a = self.clean_text(__a )
def check_simbol(__a : Any ):
_a = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
_a = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(__a : List[Any] ):
_a = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
_a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
_a = 0
_a = []
while pos < len(__a ):
_a = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
_a = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
_a = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
_a = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
_a , _a , _a = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
_a = e
else:
_a = pos + 1
_a = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
_a = end
return result
def UpperCamelCase__ ( self : Any , __a : List[str] , __a : Union[str, Any]="\n" ):
_a = []
_a = []
_a = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
_a = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
_a = "".join(__a )
return text
| 346 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , __a : int , __a : int , __a : int , __a : float , __a : int , __a : int , __a : int , __a : int , __a : str , __a : bool = False , ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.Embedding(__a , __a )
_a = False
_a = nn.Dropout(p=__a )
_a = TaConfig(
vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , )
_a = nn.ModuleList()
for lyr_num in range(__a ):
_a = TaBlock(__a )
self.encoders.append(__a )
_a = TaLayerNorm(__a )
_a = nn.Dropout(p=__a )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] , __a : Dict ):
_a = self.token_embedder(__a )
_a = encoder_input_tokens.shape[1]
_a = torch.arange(__a , device=encoder_input_tokens.device )
x += self.position_encoding(__a )
_a = self.dropout_pre(__a )
# inverted the attention mask
_a = encoder_input_tokens.size()
_a = self.get_extended_attention_mask(__a , __a )
for lyr in self.encoders:
_a = lyr(__a , __a )[0]
_a = self.layer_norm(__a )
return self.dropout_post(__a ), encoder_inputs_mask
| 346 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['audio_values', 'audio_mask']
def __init__( self : List[str] , __a : Optional[Any]=20_48 , __a : Optional[Any]=1 , __a : Tuple=[16, 16] , __a : List[str]=1_28 , __a : str=4_41_00 , __a : int=86 , __a : Any=20_48 , __a : Dict=0.0 , **__a : Tuple , ):
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , )
_a = spectrogram_length
_a = num_channels
_a = patch_size
_a = feature_size // self.patch_size[1]
_a = n_fft
_a = sampling_rate // hop_length_to_sampling_rate
_a = sampling_rate
_a = padding_value
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , ).T
def UpperCamelCase__ ( self : Dict , __a : np.array ):
_a = spectrogram(
__a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
_a = log_spec[:, :-1]
_a = log_spec - 20.0
_a = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = True , __a : Optional[int] = None , __a : bool = False , __a : bool = False , **__a : str , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_a = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
_a = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __a ):
_a = [np.asarray(__a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_a = np.array(__a ).astype(np.floataa )
# convert into correct format for padding
_a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_a = np.ones([len(__a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_a = padded_audio_features * self.padding_value
for i in range(len(__a ) ):
_a = audio_features[i]
_a = feature
# return as BatchFeature
if return_attention_mask:
_a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
_a = {"audio_values": padded_audio_features}
_a = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 346 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 1 |
'''simple docstring'''
import random
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __a : str ):
_a = [ord(__a ) for i in text]
_a = []
_a = []
for i in plain:
_a = random.randint(1 , 3_00 )
_a = (i + k) * k
cipher.append(__a )
key.append(__a )
return cipher, key
@staticmethod
def UpperCamelCase__ ( __a : list[int] , __a : list[int] ):
_a = []
for i in range(len(__a ) ):
_a = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__a ) )
return "".join(__a )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ : Dict = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 346 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCAmelCase_ : List[str] = trt.Logger(trt.Logger.WARNING)
lowerCAmelCase_ : List[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCAmelCase_ : Any = logging.getLogger(__name__)
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_84,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_28,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
lowerCAmelCase_ : List[str] = parser.parse_args()
if args.tokenizer_name:
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
lowerCAmelCase_ : str = args.per_device_eval_batch_size
lowerCAmelCase_ : Optional[int] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Optional[Any] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
lowerCAmelCase_ : List[Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
lowerCAmelCase_ : str = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
lowerCAmelCase_ : Union[str, Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCAmelCase_ : Optional[int] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCAmelCase_ : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCAmelCase_ : Union[str, Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCAmelCase_ : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCAmelCase_ : Tuple = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowerCamelCase ( lowercase : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Any , lowercase : Tuple , lowercase : Optional[int] , lowercase : str ) -> Tuple:
_a = np.asarray(inputs["input_ids"] , dtype=np.intaa )
_a = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
_a = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
_a = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_a = time.time()
_a = end_time - start_time
_a = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCAmelCase_ : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCAmelCase_ : Union[str, Any] = raw_datasets['validation'].column_names
lowerCAmelCase_ : List[Any] = 'question' if 'question' in column_names else column_names[0]
lowerCAmelCase_ : int = 'context' if 'context' in column_names else column_names[1]
lowerCAmelCase_ : List[str] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCAmelCase_ : List[str] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
lowerCAmelCase_ : Optional[int] = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_a = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_a = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_a = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_a = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_a = tokenized_examples.sequence_ids(lowercase )
_a = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_a = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_a = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
lowerCAmelCase_ : Dict = raw_datasets['validation']
# Validation Feature Creation
lowerCAmelCase_ : Optional[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
lowerCAmelCase_ : Optional[int] = default_data_collator
lowerCAmelCase_ : Optional[Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
lowerCAmelCase_ : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Tuple="eval" ) -> int:
# Post-processing: we match the start logits and end logits to answers in the original context.
_a = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_a = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
_a = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
_a = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
lowerCAmelCase_ : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
lowerCAmelCase_ : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCAmelCase_ : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCAmelCase_ : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCAmelCase_ : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
lowerCAmelCase_ : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCAmelCase_ : Dict = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
lowerCAmelCase_ : Optional[int] = 0.0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : List[str] = timeit.default_timer()
lowerCAmelCase_ : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCAmelCase_ , lowerCAmelCase_ : str = outputs
lowerCAmelCase_ : List[str] = torch.tensor(start_logits)
lowerCAmelCase_ : Dict = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCAmelCase_ : Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCAmelCase_ : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCAmelCase_ : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCAmelCase_ : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCAmelCase_ : Any = nested_truncate(all_preds, len(eval_dataset))
lowerCAmelCase_ : List[str] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 10_00 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 10_00))
logger.info('Total Number of Inference = %d', niter)
lowerCAmelCase_ : Union[str, Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCAmelCase_ : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 346 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
_a = 10
_a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ : Union[str, Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.txt"
_a = FILE_CONTENT
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
import bza
_a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_a = bytes(lowercase , "utf-8" )
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_a = bytes(lowercase , "utf-8" )
with gzip.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Union[str, Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_a = bytes(lowercase , "utf-8" )
with lza.frame.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowercase , "w" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> Dict:
import tarfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
import lzma
_a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_a = bytes(lowercase , "utf-8" )
with lzma.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> Union[str, Any]:
import zipfile
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_a = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "file.xml"
_a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return filename
lowerCAmelCase_ : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = datasets.Dataset.from_dict(lowercase )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict ) -> Dict:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowercase , "w" , newline="" ) as f:
_a = csv.DictWriter(lowercase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> int:
import bza
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowercase , "rb" ) as f:
_a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Any ) -> List[str]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : Any , lowercase : List[Any] ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowercase , "wb" ) as f:
_a = pq.ParquetWriter(lowercase , schema=lowercase )
_a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_a = {"data": DATA_DICT_OF_LISTS}
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] ) -> str:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> List[str]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[Any]:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> int:
_a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowercase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Tuple:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] ) -> List[Any]:
import gzip
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowercase , "rb" ) as orig_file:
with gzip.open(lowercase , "wb" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> str:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : List[Any] ) -> Optional[int]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : str ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowercase , "w" ) as f:
f.add(lowercase , arcname=os.path.join("nested" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : int ) -> str:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = ["0", "1", "2", "3"]
_a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowercase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : List[str] ) -> Union[str, Any]:
_a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("main_dir" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Tuple , lowercase : int , lowercase : str ) -> int:
_a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowercase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Any:
_a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> Dict:
_a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowercase , "w" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase ( lowercase : str ) -> str:
_a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__a =(
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a =False
__a =False
def UpperCamelCase__ ( self : str , __a : str , __a : Optional[int] , __a : List[str]=False ):
_a = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Optional[int]=13 , __a : str=7 , __a : Tuple=True , __a : str=True , __a : Any=True , __a : List[Any]=True , __a : Dict=99 , __a : Any=32 , __a : Optional[Any]=32 , __a : Dict=2 , __a : Dict=4 , __a : Union[str, Any]=37 , __a : str="gelu" , __a : List[Any]=0.1 , __a : Any=0.1 , __a : Tuple=5_12 , __a : Optional[Any]=16 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Tuple=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
_a = embedding_size
def UpperCamelCase__ ( self : Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : List[Any] , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : int ):
_a = TFMobileBertModel(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
_a = [input_ids, input_mask]
_a = model(__a )
_a = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[str] , __a : Tuple , __a : Optional[Any] , __a : Optional[Any] , __a : List[str] , __a : int , __a : Dict ):
_a = TFMobileBertForMaskedLM(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Optional[Any] , __a : int , __a : Optional[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : int , __a : str ):
_a = TFMobileBertForNextSentencePrediction(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : Any , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] , __a : Dict , __a : List[str] , __a : List[Any] , __a : List[Any] ):
_a = TFMobileBertForPreTraining(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : List[str] , __a : Tuple , __a : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : str , __a : Tuple ):
_a = self.num_labels
_a = TFMobileBertForSequenceClassification(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : int , __a : Optional[Any] , __a : Any , __a : Dict , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : str ):
_a = self.num_choices
_a = TFMobileBertForMultipleChoice(config=__a )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : List[Any] , __a : List[Any] , __a : str , __a : int , __a : List[Any] , __a : List[str] , __a : Optional[int] , __a : Tuple ):
_a = self.num_labels
_a = TFMobileBertForTokenClassification(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Dict , __a : Optional[int] , __a : Dict , __a : List[str] , __a : Union[str, Any] , __a : Optional[int] , __a : Tuple , __a : List[str] ):
_a = TFMobileBertForQuestionAnswering(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = TFMobileBertModelTest.TFMobileBertModelTester(self )
_a = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ ( self : int ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a )
def UpperCamelCase__ ( self : List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a )
@slow
def UpperCamelCase__ ( self : Any ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_a = TFMobileBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(__a )[0]
_a = [1, 6, 3_05_22]
self.assertEqual(output.shape , __a )
_a = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.