code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=1_000 ) -> Any:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase = n - 1
UpperCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase = 0
while count < prec:
UpperCAmelCase = random.randint(2 , n - 1 )
UpperCAmelCase = bin_exp_mod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if b != 1:
UpperCAmelCase = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
UpperCAmelCase = False
break
UpperCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__ : List[Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 51 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ : Dict = None
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCamelCase_ : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = MBartTokenizer
snake_case = []
snake_case = []
def __init__( self : List[str] , _snake_case : Tuple=None , _snake_case : int=None , _snake_case : List[Any]="<s>" , _snake_case : Tuple="</s>" , _snake_case : str="</s>" , _snake_case : List[Any]="<s>" , _snake_case : Dict="<unk>" , _snake_case : str="<pad>" , _snake_case : Any="<mask>" , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : Any=None , **_snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(_snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else "en_XX"
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Tuple , _snake_case : str ) -> None:
"""simple docstring"""
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[int] ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : str = "en_XX" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro_RO" , **_snake_case : str , ) -> BatchEncoding:
"""simple docstring"""
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Tuple , _snake_case : List[str] ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : List[str] , _snake_case : str ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 115 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ={}
state_dict.pop('''pixel_mean''' , lowerCamelCase__ )
state_dict.pop('''pixel_std''' , lowerCamelCase__ )
lowerCamelCase__ : Union[str, Any] =R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ : str =key.replace(lowerCamelCase__ , lowerCamelCase__ )
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase__ : Tuple =int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
lowerCamelCase__ : List[str] =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowerCamelCase__ : int =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowerCamelCase__ : List[str] =key.replace('''layers.2''' , '''proj_out''' )
lowerCamelCase__ : Optional[Any] =value
lowerCamelCase__ : str =model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="ybelkada/segment-anything" ):
"""simple docstring"""
lowerCamelCase__ : Dict =hf_hub_download(lowerCamelCase__ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase__ : Optional[int] =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase__ : Dict =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase__ : Optional[int] =SamConfig(
vision_config=lowerCamelCase__ , )
elif "sam_vit_h" in model_name:
lowerCamelCase__ : List[str] =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase__ : Any =SamConfig(
vision_config=lowerCamelCase__ , )
lowerCamelCase__ : Union[str, Any] =torch.load(lowerCamelCase__ , map_location='''cpu''' )
lowerCamelCase__ : int =replace_keys(lowerCamelCase__ )
lowerCamelCase__ : int =SamImageProcessor()
lowerCamelCase__ : Any =SamProcessor(image_processor=lowerCamelCase__ )
lowerCamelCase__ : str =SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
lowerCamelCase__ : List[str] =hf_model.to('''cuda''' )
lowerCamelCase__ : List[Any] ="https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase__ : Tuple =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('''RGB''' )
lowerCamelCase__ : List[str] =[[[400, 650]]]
lowerCamelCase__ : Any =[[1]]
lowerCamelCase__ : Any =processor(images=np.array(lowerCamelCase__ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : str =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : int =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
lowerCamelCase__ : Tuple =processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : Optional[Any] =output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
lowerCamelCase__ : Any =((75, 275, 1725, 850),)
lowerCamelCase__ : Any =processor(images=np.array(lowerCamelCase__ ) , input_boxes=lowerCamelCase__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : Dict =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : Tuple =output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
lowerCamelCase__ : str =[[[400, 650], [800, 650]]]
lowerCamelCase__ : Tuple =[[1, 1]]
lowerCamelCase__ : str =processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ : List[Any] =hf_model(**lowerCamelCase__ )
lowerCamelCase__ : str =output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
_lowercase : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowercase : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 710 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["ConditionalDetrFeatureExtractor"]
__A : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
if "resnet-50" in model_name:
_UpperCAmelCase = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_UpperCAmelCase = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_UpperCAmelCase = DetrConfig(use_timm_backbone=_lowerCAmelCase , backbone_config=_lowerCAmelCase )
# set label attributes
_UpperCAmelCase = 'panoptic' in model_name
if is_panoptic:
_UpperCAmelCase = 250
else:
_UpperCAmelCase = 91
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'coco-detection-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCAmelCase ( A : Optional[Any] , A : List[str] , A : int ):
'''simple docstring'''
_UpperCAmelCase = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase = val
def UpperCAmelCase ( A : Any , A : Union[str, Any]=False ):
'''simple docstring'''
_UpperCAmelCase = ''
if is_panoptic:
_UpperCAmelCase = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCAmelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:256, :]
_UpperCAmelCase = in_proj_bias[:256]
_UpperCAmelCase = in_proj_weight[256:512, :]
_UpperCAmelCase = in_proj_bias[256:512]
_UpperCAmelCase = in_proj_weight[-256:, :]
_UpperCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCAmelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:256, :]
_UpperCAmelCase = in_proj_bias[:256]
_UpperCAmelCase = in_proj_weight[256:512, :]
_UpperCAmelCase = in_proj_bias[256:512]
_UpperCAmelCase = in_proj_weight[-256:, :]
_UpperCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_UpperCAmelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:256, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:256]
_UpperCAmelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCAmelCase = in_proj_bias_cross_attn[256:512]
_UpperCAmelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-256:]
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( A : str , A : Dict=None , A : Optional[int]=False ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = get_detr_config(_lowerCAmelCase )
# load original model from torch hub
_UpperCAmelCase = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f'Converting model {model_name}...' )
_UpperCAmelCase = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_lowerCAmelCase ).eval()
_UpperCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowerCAmelCase ):
if is_panoptic:
_UpperCAmelCase = 'detr.' + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_UpperCAmelCase = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_UpperCAmelCase = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_UpperCAmelCase = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase = DetrForSegmentation(_lowerCAmelCase ) if is_panoptic else DetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify our conversion on an image
_UpperCAmelCase = 'coco_panoptic' if is_panoptic else 'coco_detection'
_UpperCAmelCase = DetrImageProcessor(format=_lowerCAmelCase )
_UpperCAmelCase = processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = detr(_lowerCAmelCase )
_UpperCAmelCase = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowercase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __snake_case ( lowercase : Optional[int] ):
snake_case_ = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __snake_case ( lowercase : Optional[Any] ):
snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(lowercase , lowercase , bias=lowercase )
snake_case_ = emb.weight.data
return lin_layer
def __snake_case ( lowercase : Optional[int] ):
snake_case_ = torch.load(lowercase , map_location="cpu" )
snake_case_ = Namespace(**checkpoint["cfg"]["model"] )
snake_case_ = checkpoint["model"]
remove_ignore_keys_(lowercase )
snake_case_ = state_dict["decoder.embed_tokens.weight"].shape[0]
snake_case_ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
snake_case_ = XGLMConfig(
vocab_size=lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
snake_case_ = XGLMForCausalLM(lowercase )
snake_case_ = model.load_state_dict(lowercase , strict=lowercase )
print(lowercase )
snake_case_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ = parser.parse_args()
lowercase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 508 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''EncodecFeatureExtractor'''
A = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = self.feature_extractor
lowerCAmelCase__ :Tuple = False
def snake_case_ ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("sampling_rate" , _lowerCAmelCase )
lowerCAmelCase__ :Dict = kwargs.pop("text" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :Optional[int] = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCAmelCase__ :Any = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if audio is not None:
lowerCAmelCase__ :Tuple = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ :List[str] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCAmelCase__ :int = audio_inputs["padding_mask"]
return inputs
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("padding_mask" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = to_numpy(_lowerCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = to_numpy(_lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ :str = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ :Union[str, Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ :Optional[Any] = np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audio_values.tolist()
for i in range(_lowerCAmelCase ):
lowerCAmelCase__ :str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ :List[Any] = sliced_audio.reshape(_lowerCAmelCase , -1 )
return audio_values
| 145 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[int] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 221 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" ,[None, 4_00 * 2**20, 6_00 * 2**20])
@pytest.mark.parametrize("input_in_memory_max_size" ,["default", 0, 1_00 * 2**20, 9_00 * 2**20])
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,"IN_MEMORY_MAX_SIZE" ,A_)
UpperCamelCase__: List[str] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase__: List[Any] = dataset_size < in_memory_max_size
else:
UpperCamelCase__: int = False
UpperCamelCase__: int = is_small_dataset(A_)
assert result == expected
| 221 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'data2vec-vision'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 695 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __snake_case ( __lowerCAmelCase ):
a__ = """deformable_detr"""
a__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=3_00 , lowercase=10_24 , lowercase=6 , lowercase=10_24 , lowercase=8 , lowercase=6 , lowercase=10_24 , lowercase=8 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=2_56 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=True , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=4 , lowercase=4 , lowercase=4 , lowercase=False , lowercase=3_00 , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=0.1 , lowercase=0.25 , lowercase=False , **lowercase , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
a__: List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase , lowercase):
a__: int = backbone_config.get('model_type')
a__: List[str] = CONFIG_MAPPING[backbone_model_type]
a__: int = config_class.from_dict(lowercase)
a__: Optional[Any] = use_timm_backbone
a__: Any = backbone_config
a__: Dict = num_channels
a__: Optional[Any] = num_queries
a__: Optional[int] = max_position_embeddings
a__: Any = d_model
a__: List[str] = encoder_ffn_dim
a__: Union[str, Any] = encoder_layers
a__: str = encoder_attention_heads
a__: Union[str, Any] = decoder_ffn_dim
a__: Union[str, Any] = decoder_layers
a__: Optional[Any] = decoder_attention_heads
a__: List[Any] = dropout
a__: Union[str, Any] = attention_dropout
a__: Optional[int] = activation_dropout
a__: str = activation_function
a__: Optional[Any] = init_std
a__: Optional[Any] = init_xavier_std
a__: Tuple = encoder_layerdrop
a__: List[Any] = auxiliary_loss
a__: Dict = position_embedding_type
a__: Optional[int] = backbone
a__: Any = use_pretrained_backbone
a__: Tuple = dilation
# deformable attributes
a__: Optional[Any] = num_feature_levels
a__: str = encoder_n_points
a__: int = decoder_n_points
a__: Union[str, Any] = two_stage
a__: int = two_stage_num_proposals
a__: str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
a__: List[str] = class_cost
a__: Dict = bbox_cost
a__: Tuple = giou_cost
# Loss coefficients
a__: Dict = mask_loss_coefficient
a__: Optional[int] = dice_loss_coefficient
a__: Any = bbox_loss_coefficient
a__: Tuple = giou_loss_coefficient
a__: Dict = eos_coefficient
a__: List[str] = focal_alpha
a__: int = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase)
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.d_model
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
a__: Any = self.backbone_config.to_dict()
a__: Any = self.__class__.model_type
return output
| 217 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: int = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Dict = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: Any = input_str.split('_' )
a__: int = 0 if use_pascal else 1
a__: str = words[start_index:]
a__: Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: Dict = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 217 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE_: Any = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Tuple = False
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_lowerCAmelCase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def __lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = 'lower'
_lowerCAmelCase = ['low', 'er</w>']
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = tokens + ['<unk>']
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : List[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
def __lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
pass
| 580 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : NestedDataStructureLike[PathLike] , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , )
_lowerCAmelCase = field
_lowerCAmelCase = path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else {self.split: path_or_paths}
_lowerCAmelCase = Json(
cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , field=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if self.streaming:
_lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , )
_lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : Union[PathLike, BinaryIO] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Optional[Any] , ) -> Dict:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_lowerCAmelCase = dataset
_lowerCAmelCase = path_or_buf
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = 'utf-8'
_lowerCAmelCase = to_json_kwargs
def __lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.to_json_kwargs.pop('path_or_buf' , UpperCAmelCase_ )
_lowerCAmelCase = self.to_json_kwargs.pop('orient' , 'records' )
_lowerCAmelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_lowerCAmelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_lowerCAmelCase = self.to_json_kwargs.pop('compression' , UpperCAmelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=UpperCAmelCase_ ) as buffer:
_lowerCAmelCase = self._write(file_obj=UpperCAmelCase_ , orient=UpperCAmelCase_ , lines=UpperCAmelCase_ , index=UpperCAmelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
' was passed. Please provide a local path instead.' )
_lowerCAmelCase = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase_ , lines=UpperCAmelCase_ , index=UpperCAmelCase_ , **self.to_json_kwargs )
return written
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase_ , orient=UpperCAmelCase_ , lines=UpperCAmelCase_ , index=UpperCAmelCase_ , **UpperCAmelCase_ )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : BinaryIO , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] , ) -> int:
"""simple docstring"""
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_lowerCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase_ )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase_ , UpperCAmelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(UpperCAmelCase_ )
return written
| 580 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MobileBertTokenizer
__lowerCamelCase : int = MobileBertTokenizerFast
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Tuple = True
__lowerCamelCase : Optional[int] = filter_non_english
__lowerCamelCase : Tuple = 'google/mobilebert-uncased'
def a__ (self ) -> Any:
"""simple docstring"""
super().setUp()
_a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a__ (self , A ) -> List[str]:
"""simple docstring"""
_a = '''UNwant\u00E9d,running'''
_a = '''unwanted, running'''
return input_text, output_text
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = '''UNwant\u00E9d,running'''
_a = tokenizer.tokenize(A__ )
_a = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_a = tokenizer.encode(A__ , add_special_tokens=A__ )
_a = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(A__ )
_a = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_a = self.get_tokenizer(do_lower_case=A__ )
_a = self.get_rust_tokenizer(do_lower_case=A__ )
_a = '''UNwant\u00E9d,running'''
_a = tokenizer.tokenize(A__ )
_a = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_a = tokenizer.encode(A__ , add_special_tokens=A__ )
_a = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(A__ )
_a = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__ (self ) -> str:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__ (self ) -> str:
"""simple docstring"""
_a = BasicTokenizer(do_lower_case=A__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_a = {}
for i, token in enumerate(A__ ):
_a = i
_a = WordpieceTokenizer(vocab=A__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a__ (self ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=A__ )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A__ )
_a = tokenizer.build_inputs_with_special_tokens(A__ )
_a = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ (self ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_a = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_a = tokenizer_r.do_lower_case if hasattr(A__ , '''do_lower_case''' ) else False
_a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ['''的''', '''人''', '''有''']
_a = ''''''.join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_a = True
_a = self.tokenizer_class.from_pretrained(A__ , **A__ )
_a = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_a = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_a = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_a = tokenizer_r.convert_ids_to_tokens(A__ )
_a = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_a = False
_a = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_a = self.tokenizer_class.from_pretrained(A__ , **A__ )
_a = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_a = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_a = tokenizer_r.convert_ids_to_tokens(A__ )
_a = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 700 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , ) -> Any:
"""simple docstring"""
_a = size if size is not None else {'''shortest_edge''': 20}
_a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def a__ (self ) -> int:
"""simple docstring"""
_a = MobileNetVaImageProcessingTester(self )
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''crop_size''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 352 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase__ ( __magic_name__ : int ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = {}
snake_case__ : Tuple = []
snake_case__ : List[Any] = 1
snake_case__ : Union[str, Any] = [1, 2]
snake_case__ : str = {"""a""": 1, """b""": 2}
snake_case__ : Dict = {"""a""": [1, 2], """b""": [3, 4]}
snake_case__ : Dict = {"""a""": {"""1""": 1}, """b""": 2}
snake_case__ : Optional[int] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
snake_case__ : int = {}
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = 2
snake_case__ : Tuple = [2, 3]
snake_case__ : str = {"""a""": 2, """b""": 3}
snake_case__ : List[Any] = {"""a""": [2, 3], """b""": [4, 5]}
snake_case__ : Union[str, Any] = {"""a""": {"""1""": 2}, """b""": 3}
snake_case__ : Optional[int] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = 2
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : int = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
snake_case__ : int = {"""a""": 2, """b""": 0, """c""": 2}
snake_case__ : Union[str, Any] = {
"""a""": np.eye(2 ).astype(__SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(__SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(__SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda __SCREAMING_SNAKE_CASE : x + 1 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = {"""a""": 1, """b""": 2}
snake_case__ : Optional[int] = {"""a""": 3, """b""": 4}
snake_case__ : List[Any] = {"""a""": 5, """b""": 6}
snake_case__ : Optional[Any] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''bar'''
snake_case__ : str = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(__SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
snake_case__ : Optional[int] = {f"{i}": i for i in range(__magic_name__ )}
snake_case__ : List[str] = map_nested(lambda __magic_name__ : x + 10 , __magic_name__ , num_proc=__magic_name__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_tf
def __UpperCamelCase ( self ):
import tensorflow as tf
from tensorflow.keras import layers
snake_case__ : Any = layers.Dense(2 )
def gen_random_output():
snake_case__ : Union[str, Any] = tf.random.uniform((1, 3) )
return model(__SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(4_2 , set_tensorflow=__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = gen_random_output()
with temp_seed(4_2 , set_tensorflow=__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = gen_random_output()
snake_case__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase ( self ):
import torch
def gen_random_output():
snake_case__ : List[Any] = torch.nn.Linear(3 , 2 )
snake_case__ : Dict = torch.rand(1 , 3 )
return model(__SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(4_2 , set_pytorch=__SCREAMING_SNAKE_CASE ):
snake_case__ : int = gen_random_output()
with temp_seed(4_2 , set_pytorch=__SCREAMING_SNAKE_CASE ):
snake_case__ : str = gen_random_output()
snake_case__ : Any = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
snake_case__ : Any = gen_random_output()
with temp_seed(4_2 ):
snake_case__ : Optional[Any] = gen_random_output()
snake_case__ : int = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = NestedDataStructure(__magic_name__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[int] = NestedDataStructure(__magic_name__ ).flatten()
assert output == expected_output
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = A(x=1 , y="""foobar""" )
snake_case__ : Optional[int] = {"""x""": 1, """y""": """foobar"""}
assert asdict(__magic_name__ ) == expected_output
snake_case__ : Tuple = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
snake_case__ : str = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__magic_name__ ) == expected_output
with pytest.raises(__magic_name__ ):
asdict([1, A(x=10 , y="""foo""" )] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return text.split()
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
with Pool(2 ) as pool:
snake_case__ : Optional[Any] = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__magic_name__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case__ : Dict = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__magic_name__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case__ : Any = []
for yield_time, content in iflatmap_unordered(
__magic_name__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__magic_name__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__magic_name__ ) == 4
| 38 |
from __future__ import annotations
from cmath import sqrt
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
a_ : Any = b * b - 4 * a * c
a_ : List[str] = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
a_ : Union[str, Any] = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCamelCase ( ):
"""simple docstring"""
a_ , a_ : str = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 419 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ), __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 703 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 1_6 ):
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : int = DatasetDict(
{
"train": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"validation": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE, padding="longest", max_length=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_tensors="pt", )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = DataLoader(
tokenized_datasets["test"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# Download the dataset
snake_case_ : Tuple = load_dataset("glue", "mrpc" )
# Create our splits
snake_case_ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[Any] = config["lr"]
snake_case_ : str = int(config["num_epochs"] )
snake_case_ : Tuple = int(config["seed"] )
snake_case_ : Optional[Any] = int(config["batch_size"] )
snake_case_ : List[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] )
snake_case_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters(), lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE, num_warmup_steps=1_0_0, num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE, )
snake_case_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Any = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE, dim=0 )
snake_case_ : Any = torch.stack(__SCREAMING_SNAKE_CASE, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Tuple = metric.compute(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
accelerator.print("Average test metrics from all folds:", __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__SCREAMING_SNAKE_CASE, default=__SCREAMING_SNAKE_CASE, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds", type=__SCREAMING_SNAKE_CASE, default=3, help="The number of splits to perform across the dataset" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[Any] = data_utils.TransfoXLTokenizer
A_ : Union[str, Any] = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : Optional[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: List[str] = TransfoXLConfig()
else:
UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 57 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCamelCase ( A_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int =VideoMAEConfig()
set_architecture_configs(A_ , A_ )
if "finetuned" not in model_name:
UpperCamelCase__ : int =False
if "finetuned" in model_name:
UpperCamelCase__ : Tuple ="huggingface/label-files"
if "kinetics" in model_name:
UpperCamelCase__ : Tuple =4_0_0
UpperCamelCase__ : Dict ="kinetics400-id2label.json"
elif "ssv2" in model_name:
UpperCamelCase__ : Optional[Any] =1_7_4
UpperCamelCase__ : Any ="something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
UpperCamelCase__ : Optional[int] =json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ : Any ={int(A_ ): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict =idalabel
UpperCamelCase__ : Optional[Any] ={v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( A_ : Union[str, Any] , A_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if "small" in model_name:
UpperCamelCase__ : List[Any] =3_8_4
UpperCamelCase__ : Optional[int] =1_5_3_6
UpperCamelCase__ : List[str] =1_2
UpperCamelCase__ : List[str] =1_6
UpperCamelCase__ : Dict =1_2
UpperCamelCase__ : Dict =3
UpperCamelCase__ : Any =1_9_2
UpperCamelCase__ : Dict =7_6_8
elif "large" in model_name:
UpperCamelCase__ : Optional[Any] =1_0_2_4
UpperCamelCase__ : Optional[int] =4_0_9_6
UpperCamelCase__ : Optional[int] =2_4
UpperCamelCase__ : List[Any] =1_6
UpperCamelCase__ : Optional[Any] =1_2
UpperCamelCase__ : Any =8
UpperCamelCase__ : Union[str, Any] =5_1_2
UpperCamelCase__ : Optional[int] =2_0_4_8
elif "huge" in model_name:
UpperCamelCase__ : int =1_2_8_0
UpperCamelCase__ : Tuple =5_1_2_0
UpperCamelCase__ : Any =3_2
UpperCamelCase__ : List[Any] =1_6
UpperCamelCase__ : Tuple =1_2
UpperCamelCase__ : int =8
UpperCamelCase__ : Dict =6_4_0
UpperCamelCase__ : Optional[Any] =2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _lowerCamelCase ( A_ : Optional[Any] ) -> Any:
'''simple docstring'''
if "encoder." in name:
UpperCamelCase__ : Dict =name.replace("encoder." , "" )
if "cls_token" in name:
UpperCamelCase__ : str =name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
UpperCamelCase__ : Any =name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase__ : Optional[int] =name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCamelCase__ : Union[str, Any] =name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase__ : int =name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
UpperCamelCase__ : List[Any] =name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
UpperCamelCase__ : Dict =name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
UpperCamelCase__ : Dict =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
UpperCamelCase__ : List[Any] =name.replace("attn" , "attention.self" )
if "attn" in name:
UpperCamelCase__ : List[Any] =name.replace("attn" , "attention.attention" )
if "norm1" in name:
UpperCamelCase__ : Any =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase__ : Any =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase__ : Tuple =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple =name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
UpperCamelCase__ : List[Any] =name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
UpperCamelCase__ : List[Any] =name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
UpperCamelCase__ : str =name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase__ : Optional[Any] =name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase__ : Dict =name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
UpperCamelCase__ : Optional[Any] =name.replace("head" , "classifier" )
return name
def _lowerCamelCase ( A_ : Optional[Any] , A_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : List[str] =orig_state_dict.pop(A_ )
if key.startswith("encoder." ):
UpperCamelCase__ : List[str] =key.replace("encoder." , "" )
if "qkv" in key:
UpperCamelCase__ : int =key.split("." )
if key.startswith("decoder.blocks" ):
UpperCamelCase__ : Union[str, Any] =config.decoder_hidden_size
UpperCamelCase__ : List[Any] =int(key_split[2] )
UpperCamelCase__ : int ="decoder.decoder_layers."
if "weight" in key:
UpperCamelCase__ : Any =val[:dim, :]
UpperCamelCase__ : Dict =val[dim : dim * 2, :]
UpperCamelCase__ : Union[str, Any] =val[-dim:, :]
else:
UpperCamelCase__ : List[str] =config.hidden_size
UpperCamelCase__ : List[str] =int(key_split[1] )
UpperCamelCase__ : List[Any] ="videomae.encoder.layer."
if "weight" in key:
UpperCamelCase__ : Any =val[:dim, :]
UpperCamelCase__ : int =val[dim : dim * 2, :]
UpperCamelCase__ : Dict =val[-dim:, :]
else:
UpperCamelCase__ : Dict =val
return orig_state_dict
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCamelCase__ : Dict =np.load(A_ )
return list(A_ )
def _lowerCamelCase ( A_ : Tuple , A_ : Dict , A_ : Any , A_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : str =get_videomae_config(A_ )
if "finetuned" in model_name:
UpperCamelCase__ : int =VideoMAEForVideoClassification(A_ )
else:
UpperCamelCase__ : str =VideoMAEForPreTraining(A_ )
# download original checkpoint, hosted on Google Drive
UpperCamelCase__ : List[Any] ="pytorch_model.bin"
gdown.cached_download(A_ , A_ , quiet=A_ )
UpperCamelCase__ : int =torch.load(A_ , map_location="cpu" )
if "model" in files:
UpperCamelCase__ : Tuple =files["model"]
else:
UpperCamelCase__ : List[str] =files["module"]
UpperCamelCase__ : Dict =convert_state_dict(A_ , A_ )
model.load_state_dict(A_ )
model.eval()
# verify model on basic input
UpperCamelCase__ : List[Any] =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCamelCase__ : List[str] =prepare_video()
UpperCamelCase__ : Union[str, Any] =image_processor(A_ , return_tensors="pt" )
if "finetuned" not in model_name:
UpperCamelCase__ : Union[str, Any] =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
UpperCamelCase__ : List[str] =torch.load(A_ )
UpperCamelCase__ : int =model(**A_ )
UpperCamelCase__ : str =outputs.logits
UpperCamelCase__ : Optional[int] =[
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCamelCase__ : Union[str, Any] =torch.Size([1, 4_0_0] )
UpperCamelCase__ : List[Any] =torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCamelCase__ : Any =torch.Size([1, 1_7_4] )
UpperCamelCase__ : List[Any] =torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
UpperCamelCase__ : Optional[int] =torch.Size([1, 1_4_0_8, 1_5_3_6] )
UpperCamelCase__ : List[str] =torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
UpperCamelCase__ : Union[str, Any] =torch.Size([1, 1_4_0_8, 1_5_3_6] )
UpperCamelCase__ : List[str] =torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCamelCase__ : Dict =torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
UpperCamelCase__ : Dict =torch.Size([1, 1_4_0_8, 1_5_3_6] )
UpperCamelCase__ : str =torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCamelCase__ : int =torch.Size([1, 4_0_0] )
UpperCamelCase__ : int =torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCamelCase__ : str =torch.Size([1, 4_0_0] )
UpperCamelCase__ : List[Any] =torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCamelCase__ : Optional[Any] =torch.Size([1, 4_0_0] )
UpperCamelCase__ : Optional[int] =torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCamelCase__ : Union[str, Any] =torch.Size([1, 4_0_0] )
UpperCamelCase__ : List[Any] =torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
UpperCamelCase__ : str =torch.Size([1, 1_4_0_8, 1_5_3_6] )
UpperCamelCase__ : Optional[int] =torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCamelCase__ : List[str] =torch.Size([1, 1_7_4] )
UpperCamelCase__ : Optional[int] =torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
UpperCamelCase__ : str =torch.Size([1, 1_4_0_8, 1_5_3_6] )
UpperCamelCase__ : Any =torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCamelCase__ : Union[str, Any] =torch.Size([1, 1_7_4] )
UpperCamelCase__ : int =torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , A_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , A_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCamelCase__ : str =outputs.loss
assert torch.allclose(A_ , A_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
model.save_pretrained(A_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(A_ , organization="nielsr" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 582 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''CLIPImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict =kwargs.pop("feature_extractor")
UpperCamelCase__ : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if images is not None:
UpperCamelCase__ : int =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
UpperCamelCase__ : str =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict =self.tokenizer.model_input_names
UpperCamelCase__ : List[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 582 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase (_lowerCAmelCase , _lowerCAmelCase=7 ):
__lowerCAmelCase = None
if token is not None:
__lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCAmelCase = """636036"""
__lowerCAmelCase = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
return result["workflow_runs"]
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = get_daily_ci_runs(_lowerCAmelCase )
__lowerCAmelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCAmelCase = workflow_run["""id"""]
break
return workflow_run_id
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_last_daily_ci_runs(_lowerCAmelCase )
if workflow_run_id is not None:
__lowerCAmelCase = get_artifacts_links(worflow_run_id=_lowerCAmelCase , token=_lowerCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCAmelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCAmelCase , artifact_url=_lowerCAmelCase , output_dir=_lowerCAmelCase , token=_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
get_last_daily_ci_artifacts(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = {}
for artifact_name in artifact_names:
__lowerCAmelCase = os.path.join(_lowerCAmelCase , f"""{artifact_name}.zip""" )
if os.path.isfile(_lowerCAmelCase ):
__lowerCAmelCase = {}
with zipfile.ZipFile(_lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCAmelCase ):
# read the file
with z.open(_lowerCAmelCase ) as f:
__lowerCAmelCase = f.read().decode("""UTF-8""" )
return results
| 465 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Union[str, Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 564 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 38 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCAmelCase : str =logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Any = set()
UpperCAmelCase__: Optional[Any] = []
def parse_line(SCREAMING_SNAKE_CASE ):
for line in fp:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
UpperCAmelCase__: Optional[int] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase__: Union[str, Any] = "\n".join(__lowerCAmelCase )
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets ):
selected_warnings.add(__lowerCAmelCase )
buffer.clear()
continue
else:
UpperCAmelCase__: Any = line.strip()
buffer.append(__lowerCAmelCase )
if from_gh:
for filename in os.listdir(__lowerCAmelCase ):
UpperCAmelCase__: List[Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__lowerCAmelCase ) as fp:
parse_line(__lowerCAmelCase )
else:
try:
with zipfile.ZipFile(__lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__lowerCAmelCase ) as fp:
parse_line(__lowerCAmelCase )
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = set()
UpperCAmelCase__: Optional[Any] = [os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) for p in os.listdir(__lowerCAmelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__lowerCAmelCase ,__lowerCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE ):
return values.split("," )
_lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_lowerCAmelCase : Union[str, Any] =parser.parse_args()
_lowerCAmelCase : str =args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCAmelCase : int =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCAmelCase : Dict =extract_warnings(args.output_dir, args.targets)
_lowerCAmelCase : Optional[Any] =sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 113 |
import numpy as np
def __lowercase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__magic_name__ : int =cst_fwd.get(UpperCamelCase__ , np.inf )
__magic_name__ : str =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__magic_name__ : List[str] =new_cost_f
__magic_name__ : Tuple =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__magic_name__ : Optional[Any] =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =-1
__magic_name__ : Union[str, Any] =set()
__magic_name__ : Tuple =set()
__magic_name__ : Optional[Any] ={source: 0}
__magic_name__ : Tuple ={destination: 0}
__magic_name__ : int ={source: None}
__magic_name__ : List[str] ={destination: None}
__magic_name__ : int =PriorityQueue()
__magic_name__ : List[str] =PriorityQueue()
__magic_name__ : Union[str, Any] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__magic_name__ , __magic_name__ : Dict =queue_forward.get()
visited_forward.add(UpperCamelCase__ )
__magic_name__ , __magic_name__ : List[str] =queue_backward.get()
visited_backward.add(UpperCamelCase__ )
__magic_name__ : Tuple =pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
__magic_name__ : List[str] =pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__magic_name__ : List[str] =shortest_distance
return shortest_path_distance
UpperCAmelCase_ : Union[str, Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
UpperCAmelCase_ : Dict = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__( self :Any , *__snake_case :List[Any] , **__snake_case :Union[str, Any] ):
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self :int , __snake_case :Optional[Any]=None ):
'''simple docstring'''
__magic_name__ : Any ={}
if top_k is not None:
__magic_name__ : str =top_k
return {}, {}, postprocess_params
def __call__( self :Tuple , __snake_case :Union[str, List[str], "Image.Image", List["Image.Image"]] , **__snake_case :Union[str, Any] ):
'''simple docstring'''
return super().__call__(__snake_case , **__snake_case )
def A__ ( self :str , __snake_case :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =load_image(__snake_case )
__magic_name__ : Any =self.image_processor(images=__snake_case , return_tensors=self.framework )
return model_inputs
def A__ ( self :int , __snake_case :Optional[int] ):
'''simple docstring'''
__magic_name__ : str =self.model(**__snake_case )
return model_outputs
def A__ ( self :List[str] , __snake_case :int , __snake_case :Union[str, Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__magic_name__ : Dict =self.model.config.num_labels
if self.framework == "pt":
__magic_name__ : int =model_outputs.logits.softmax(-1 )[0]
__magic_name__ , __magic_name__ : str =probs.topk(__snake_case )
elif self.framework == "tf":
__magic_name__ : Union[str, Any] =stable_softmax(model_outputs.logits , axis=-1 )[0]
__magic_name__ : List[str] =tf.math.top_k(__snake_case , k=__snake_case )
__magic_name__ , __magic_name__ : str =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__magic_name__ : str =scores.tolist()
__magic_name__ : Union[str, Any] =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 367 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 |
import baseaa
def _lowerCAmelCase ( _lowerCAmelCase ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def _lowerCAmelCase ( _lowerCAmelCase ) -> str:
'''simple docstring'''
return baseaa.baadecode(_lowerCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
A : Dict = 'Hello World!'
A : Any = baseaa_encode(test)
print(encoded)
A : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 371 | 0 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F"""{test_file} instead.""" )
_lowercase = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_lowercase = components[:-1] + [test_fn.replace(""".py""" , """""" )]
_lowercase = """.""".join(snake_case_ )
return test_module_path
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_module_path(snake_case_ )
_lowercase = importlib.import_module(snake_case_ )
return test_module
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = []
_lowercase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(snake_case_ , snake_case_ ) )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = []
_lowercase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
_lowercase = getattr(snake_case_ , snake_case_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowercase = getattr(snake_case_ , """all_model_classes""" , [] )
if len(snake_case_ ) > 0:
test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_test_classes(snake_case_ )
_lowercase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = test_class()
if hasattr(snake_case_ , """setUp""" ):
test.setUp()
_lowercase = None
if hasattr(snake_case_ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowercase = test.model_tester.__class__
return model_tester
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
_lowercase = get_test_classes(snake_case_ )
_lowercase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
_lowercase = get_test_classes_for_model(snake_case_ , snake_case_ )
_lowercase = []
for test_class in test_classes:
_lowercase = get_model_tester_from_test_class(snake_case_ )
if tester_class is not None:
tester_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_test_classes(snake_case_ )
_lowercase = {test_class: get_model_tester_from_test_class(snake_case_ ) for test_class in test_classes}
return test_tester_mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_model_classes(snake_case_ )
_lowercase = {
model_class: get_test_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_test_mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_model_classes(snake_case_ )
_lowercase = {
model_class: get_tester_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_to_tester_mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
return o
elif isinstance(snake_case_ , snake_case_ ):
return o.__name__
elif isinstance(snake_case_ , (list, tuple) ):
return [to_json(snake_case_ ) for x in o]
elif isinstance(snake_case_ , snake_case_ ):
return {to_json(snake_case_ ): to_json(snake_case_ ) for k, v in o.items()}
else:
return o
| 717 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
__SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=0.999 , snake_case_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase = []
for i in range(snake_case_ ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __a ( _snake_case ,_snake_case ):
@register_to_config
def __init__( self : Tuple , lowercase__ : int = 10_00 , lowercase__ : str = "fixed_small_log" , lowercase__ : bool = True , lowercase__ : Optional[float] = 1.0 , lowercase__ : str = "epsilon" , lowercase__ : str = "squaredcos_cap_v2" , ) ->Optional[Any]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""")
_lowercase = betas_for_alpha_bar(lowercase__)
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas , dim=0)
_lowercase = torch.tensor(1.0)
# standard deviation of the initial noise distribution
_lowercase = 1.0
# setable values
_lowercase = None
_lowercase = torch.from_numpy(np.arange(0 , lowercase__)[::-1].copy())
_lowercase = variance_type
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : torch.FloatTensor , lowercase__ : Optional[int] = None) ->torch.FloatTensor:
"""simple docstring"""
return sample
def _UpperCAmelCase ( self : List[str] , lowercase__ : int , lowercase__ : Union[str, torch.device] = None) ->List[str]:
"""simple docstring"""
_lowercase = num_inference_steps
_lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowercase = (np.arange(0 , lowercase__) * step_ratio).round()[::-1].copy().astype(np.intaa)
_lowercase = torch.from_numpy(lowercase__).to(lowercase__)
def _UpperCAmelCase ( self : int , lowercase__ : Optional[Any] , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : int=None) ->Tuple:
"""simple docstring"""
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowercase = torch.log(torch.clamp(lowercase__ , min=1e-20))
_lowercase = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowercase = variance.log()
_lowercase = beta.log()
_lowercase = (predicted_variance + 1) / 2
_lowercase = frac * max_log + (1 - frac) * min_log
return variance
def _UpperCAmelCase ( self : int , lowercase__ : torch.FloatTensor , lowercase__ : int , lowercase__ : torch.FloatTensor , lowercase__ : Optional[int] = None , lowercase__ : Any=None , lowercase__ : bool = True , ) ->Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowercase , _lowercase = torch.split(lowercase__ , sample.shape[1] , dim=1)
else:
_lowercase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
_lowercase = self.alphas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowercase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase = torch.clamp(
lowercase__ , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase = 0
if t > 0:
_lowercase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase__ , device=model_output.device)
_lowercase = self._get_variance(
lowercase__ , predicted_variance=lowercase__ , prev_timestep=lowercase__ , )
if self.variance_type == "fixed_small_log":
_lowercase = variance
elif self.variance_type == "learned_range":
_lowercase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""")
_lowercase = variance * variance_noise
_lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__)
def _UpperCAmelCase ( self : Dict , lowercase__ : torch.FloatTensor , lowercase__ : torch.FloatTensor , lowercase__ : torch.IntTensor , ) ->torch.FloatTensor:
"""simple docstring"""
_lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
_lowercase = timesteps.to(original_samples.device)
_lowercase = alphas_cumprod[timesteps] ** 0.5
_lowercase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
_lowercase = sqrt_alpha_prod.unsqueeze(-1)
_lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
_lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1)
_lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 572 | 0 |
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__A = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : List[str] = "▁"
__a : int = {"vocab_file": "spiece.model"}
__a : Any = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
__a : Any = {
"google/pegasus-xsum": 512,
}
__a : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict="<pad>" , UpperCamelCase_ : Dict="</s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : List[Any]="<mask_2>" , UpperCamelCase_ : List[Any]="<mask_1>" , UpperCamelCase_ : str=None , UpperCamelCase_ : Dict=103 , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : int , ):
"""simple docstring"""
__A = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
F"additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is"
F" {type(UpperCamelCase_ )}" )
__A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__A = additional_special_tokens_extended
else:
__A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__A = mask_token_sent
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# add special tokens to encoder dict
__A = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__A = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
"""simple docstring"""
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Tuple , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__A = self.sp_model.piece_to_id(UpperCamelCase_ )
return sp_id + self.offset
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__A = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : List[str] ):
"""simple docstring"""
__A = []
__A = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__A = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : List[Any]=False ):
"""simple docstring"""
return 1
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Tuple ):
"""simple docstring"""
__A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 637 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __snake_case ( _UpperCAmelCase : np.ndarray):
UpperCamelCase , UpperCamelCase , UpperCamelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __snake_case ( _UpperCAmelCase : np.ndarray):
return (gray > 127) & (gray <= 255)
def __snake_case ( _UpperCAmelCase : np.ndarray, _UpperCAmelCase : np.ndarray):
UpperCamelCase = np.zeros_like(_UpperCAmelCase)
UpperCamelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1))
# Copy image to padded image
UpperCamelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
UpperCamelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase = int(summation > 0)
return output
if __name__ == "__main__":
# read original image
snake_case_ : Union[str, Any] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
snake_case_ : Any = np.array(Image.open(lena_path))
# kernel to be applied
snake_case_ : Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
snake_case_ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
snake_case_ : Any = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : List[Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
snake_case_ : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
snake_case_ : Optional[int] = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
snake_case_ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
debug_launcher(test_script.main )
def UpperCamelCase_ ( self : List[Any] ):
debug_launcher(test_ops.main )
| 55 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A_ ( _a ):
'''simple docstring'''
a__ = CustomTokenizer
pass
| 303 | 0 |
'''simple docstring'''
UpperCamelCase_ : str = {
"""km/h""": 1.0,
"""m/s""": 3.6,
"""mph""": 1.6_0_9_3_4_4,
"""knot""": 1.8_5_2,
}
UpperCamelCase_ : Dict = {
"""km/h""": 1.0,
"""m/s""": 0.2_7_7_7_7_7_7_7_8,
"""mph""": 0.6_2_1_3_7_1_1_9_2,
"""knot""": 0.5_3_9_9_5_6_8_0_3,
}
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a__ = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase = 4_00_00_00 ):
"""simple docstring"""
a__ = [0, 1]
a__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
a__ = 0
for j in range(len(_lowercase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 394 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = 'swin2sr'
SCREAMING_SNAKE_CASE_: Dict = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCAmelCase_=6_4 , lowerCAmelCase_=1 , lowerCAmelCase_=3 , lowerCAmelCase_=1_8_0 , lowerCAmelCase_=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_=8 , lowerCAmelCase_=2.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=False , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-5 , lowerCAmelCase_=2 , lowerCAmelCase_=1.0 , lowerCAmelCase_="1conv" , lowerCAmelCase_="pixelshuffle" , **lowerCAmelCase_ , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = image_size
_SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : Any = embed_dim
_SCREAMING_SNAKE_CASE : Optional[int] = depths
_SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = num_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = window_size
_SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = upscale
_SCREAMING_SNAKE_CASE : str = img_range
_SCREAMING_SNAKE_CASE : Any = resi_connection
_SCREAMING_SNAKE_CASE : Tuple = upsampler
| 621 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : dict[int, int] = {}
_SCREAMING_SNAKE_CASE : List[Any] = 2
while True:
_SCREAMING_SNAKE_CASE : List[Any] = factor_map.pop(lowerCamelCase, lowerCamelCase )
if factor:
_SCREAMING_SNAKE_CASE : str = factor + prime
while x in factor_map:
x += factor
_SCREAMING_SNAKE_CASE : Union[str, Any] = factor
else:
_SCREAMING_SNAKE_CASE : Optional[int] = prime
yield prime
prime += 1
def lowercase__ ( lowerCamelCase = 1E10 ):
_SCREAMING_SNAKE_CASE : Dict = sieve()
_SCREAMING_SNAKE_CASE : Dict = 1
while True:
_SCREAMING_SNAKE_CASE : int = next(lowerCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 621 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a (__A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = StableUnCLIPImgaImgPipeline
__UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Dict = frozenset([] )
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : List[Any] = 32
__snake_case : Optional[Any] = embedder_hidden_size
# image encoding components
__snake_case : List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__snake_case : str = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__snake_case : int = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__snake_case : Union[str, Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__snake_case : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__snake_case : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case : str = AutoencoderKL()
__snake_case : int = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any]=0 , lowerCamelCase : int=True ) -> Optional[Any]:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : List[str] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Any = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__snake_case : str = input_image * 0.5 + 0.5
__snake_case : Tuple = input_image.clamp(0 , 1 )
__snake_case : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__snake_case : Dict = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __snake_case ( self : Dict ) -> Dict:
__snake_case : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[int] = self.get_dummy_components()
__snake_case : List[Any] = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__snake_case : str = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__snake_case : int = sd_pipe(**lowerCamelCase ).images
__snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : str = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : Union[str, Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case : int = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__snake_case : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__snake_case : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case : Dict = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__snake_case : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__snake_case : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__snake_case : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case : Any = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__snake_case : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : str ) -> Tuple:
__snake_case : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__snake_case : List[Any] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case : List[str] = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__snake_case : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 708 |
from maths.prime_check import is_prime
def lowerCAmelCase_ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | 0 |
def __a ( ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 1
while len(A__ ) < 1E6:
constant.append(str(A__ ) )
i += 1
SCREAMING_SNAKE_CASE = "".join(A__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 16 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE ( __A):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> Union[str, Any]:
_lowerCAmelCase =data
_lowerCAmelCase =None
def __iter__( self ) -> Any:
_lowerCAmelCase =self
_lowerCAmelCase =[]
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
_lowerCAmelCase =node.next_node
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase_ = Node(1)
lowercase_ = Node(2)
lowercase_ = Node(3)
lowercase_ = Node(4)
print(root_node.has_loop) # False
lowercase_ = root_node.next_node
print(root_node.has_loop) # True
lowercase_ = Node(5)
lowercase_ = Node(6)
lowercase_ = Node(5)
lowercase_ = Node(6)
print(root_node.has_loop) # False
lowercase_ = Node(1)
print(root_node.has_loop) # False
| 703 | '''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Dict = '''dandelin/vilt-b32-finetuned-vqa'''
_A : List[str] = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
_A : str = '''image_qa'''
_A : Union[str, Any] = AutoProcessor
_A : Optional[Any] = AutoModelForVisualQuestionAnswering
_A : List[str] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Tuple , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : "Image" , lowerCAmelCase__ : str ):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""" )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCAmelCase__ ).logits
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 578 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Any = '''unispeech'''
def __init__( self : Tuple , lowerCAmelCase__ : List[Any]=3_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : int=1E-5 , lowerCAmelCase__ : Dict="group" , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__ : Any=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : List[Any]=1_2_8 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=0.05 , lowerCAmelCase__ : Tuple=1_0 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Dict=1_0 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Any=3_2_0 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : int=1_0_0 , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : int=2_5_6 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int="mean" , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : int=8_0 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : str=0.5 , **lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
__SCREAMING_SNAKE_CASE : Tuple = feat_extract_activation
__SCREAMING_SNAKE_CASE : int = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = conv_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE : List[str] = len(self.conv_dim )
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
__SCREAMING_SNAKE_CASE : Dict = attention_dropout
__SCREAMING_SNAKE_CASE : Tuple = activation_dropout
__SCREAMING_SNAKE_CASE : Tuple = feat_proj_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = final_dropout
__SCREAMING_SNAKE_CASE : Tuple = layerdrop
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : int = num_ctc_classes
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = do_stable_layer_norm
__SCREAMING_SNAKE_CASE : int = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE : List[str] = apply_spec_augment
__SCREAMING_SNAKE_CASE : str = mask_time_prob
__SCREAMING_SNAKE_CASE : Dict = mask_time_length
__SCREAMING_SNAKE_CASE : Optional[int] = mask_time_min_masks
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_prob
__SCREAMING_SNAKE_CASE : Dict = mask_feature_length
__SCREAMING_SNAKE_CASE : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE : Tuple = num_codevectors_per_group
__SCREAMING_SNAKE_CASE : Optional[int] = num_codevector_groups
__SCREAMING_SNAKE_CASE : List[str] = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE : Dict = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE : int = num_negatives
__SCREAMING_SNAKE_CASE : Tuple = codevector_dim
__SCREAMING_SNAKE_CASE : Optional[int] = proj_codevector_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
__SCREAMING_SNAKE_CASE : List[str] = ctc_zero_infinity
# pretraining loss
__SCREAMING_SNAKE_CASE : Tuple = replace_prob
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 578 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def lowerCAmelCase ( UpperCamelCase_: bytes ) -> str:
'''simple docstring'''
if len(A__ ) != 32:
raise ValueError("Input must be of length 32" )
_a = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase ( UpperCamelCase_: int ) -> Any:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_a = format(A__ , "08x" )[-8:]
_a = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowerCAmelCase ( UpperCamelCase_: bytes ) -> Optional[int]:
'''simple docstring'''
_a = b""
for char in message:
bit_string += format(A__ , "08b" ).encode("utf-8" )
_a = format(len(A__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(A__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowerCAmelCase ( UpperCamelCase_: bytes ) -> Optional[int]:
'''simple docstring'''
if len(A__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(A__ ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowerCAmelCase ( UpperCamelCase_: int ) -> Dict:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_a = format(A__ , "032b" )
_a = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(A__ , 2 )
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> Optional[int]:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase ( UpperCamelCase_: bytes ) -> Optional[int]:
'''simple docstring'''
_a = preprocess(A__ )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(A__ ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(A__ ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(A__ , left_rotate_aa(A__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(A__ , A__ )
_a = sum_aa(A__ , A__ )
_a = sum_aa(A__ , A__ )
_a = sum_aa(A__ , A__ )
_a = reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (_UpperCAmelCase ):
A__ : Union[str, Any] = (PNDMScheduler,)
A__ : Optional[int] = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self , **a_ ) ->Optional[Any]:
'''simple docstring'''
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def lowerCamelCase__ ( self , a_=0 , **a_ ) ->Tuple:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**a_ )
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_a = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
def lowerCamelCase__ ( self , a_=0 , **a_ ) ->Tuple:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_a = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , **a_ ) ->Optional[int]:
'''simple docstring'''
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**a_ )
_a = scheduler_class(**a_ )
_a = 1_0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_a = model(a_ , a_ )
_a = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_a = model(a_ , a_ )
_a = scheduler.step_plms(a_ , a_ , a_ ).prev_sample
return sample
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
_a = self.dummy_sample
_a = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
_a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , 0 , a_ , **a_ ).prev_sample
_a = scheduler.step_prk(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a = scheduler.step_plms(a_ , 0 , a_ , **a_ ).prev_sample
_a = scheduler.step_plms(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a_ )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**a_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=a_ )
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=a_ )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = 2_7
for scheduler_class in self.scheduler_classes:
_a = self.dummy_sample
_a = 0.1 * sample
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_a = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
with self.assertRaises(a_ ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
_a = self.full_loop()
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
_a = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 612 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , **__lowerCamelCase : str ):
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
_snake_case = eval_examples
_snake_case = post_process_function
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[Dataset] = None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "eval" , **__lowerCamelCase : int , ):
"""simple docstring"""
_snake_case = gen_kwargs.copy()
_snake_case = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
_snake_case = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
_snake_case = gen_kwargs
_snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case = self.get_eval_dataloader(__lowerCamelCase )
_snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = time.time()
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case = eval_loop(
__lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_snake_case = self.post_process_function(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_snake_case = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
_snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : str = "test" , **__lowerCamelCase : int ):
"""simple docstring"""
_snake_case = gen_kwargs.copy()
_snake_case = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = time.time()
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case = eval_loop(
__lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case = self.post_process_function(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , '''predict''' )
_snake_case = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_snake_case = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 103 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase : str = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
_lowerCAmelCase : Any = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int=None ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : str = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_lowerCAmelCase : List[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_lowerCAmelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase : Tuple = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_lowerCAmelCase : str = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(_UpperCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : str = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _UpperCAmelCase ) , )
# Copy consistency with a really long name
_lowerCAmelCase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub("""Bert""" , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _UpperCAmelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , _UpperCAmelCase ) , )
| 714 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCamelCase : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_lowerCamelCase : Optional[Any] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"emoji": True,
},
}
]
_lowerCamelCase : str = 0
for log in Path().glob("*.log"):
_lowerCamelCase : str = 0
with open(log, "r") as f:
for line in f:
_lowerCamelCase : Optional[Any] = json.loads(line)
if line.get("nodeid", "") != "":
_lowerCamelCase : str = line["nodeid"]
if line.get("duration", None) is not None:
_lowerCamelCase : str = F'''{line['duration']:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCamelCase : Optional[int] = []
log.unlink()
_lowerCamelCase : str = ""
_lowerCamelCase : Optional[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCamelCase : Any = []
_lowerCamelCase : Dict = {}
for test in failed_tests:
_lowerCamelCase : Union[str, Any] = test[0].split("::")
_lowerCamelCase : str = data[0].split("/")[-1]
if data[0] not in filesafailed:
_lowerCamelCase : List[str] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCamelCase : List[Any] = [test[0] for test in failed_table]
_lowerCamelCase : Optional[Any] = list(set(files))
# Count number of instances in failed_tests
_lowerCamelCase : Union[str, Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCamelCase : Optional[Any] = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
_lowerCamelCase : List[str] = "Too many failed tests, please see the full report in the Action results."
_lowerCamelCase : Tuple = len(err) + 1_0
_lowerCamelCase : Union[str, Any] = message[: 3_0_0_0 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
_lowerCamelCase : List[str] = "No failed tests! 🤗"
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_lowerCamelCase : Optional[int] = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_lowerCamelCase : int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_lowerCamelCase : List[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
_lowerCamelCase : Union[str, Any] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_lowerCamelCase : int = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_lowerCamelCase : List[Any] = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCamelCase : Dict = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCamelCase : Optional[int] = row[0]
else:
_lowerCamelCase : List[str] = ""
_lowerCamelCase : int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 196 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Tuple , _a : List[str] , _a : List[Any]=13 , _a : Tuple=64 , _a : Any=2 , _a : str=3 , _a : Union[str, Any]=True , _a : Optional[Any]=True , _a : Any=32 , _a : int=5 , _a : Tuple=4 , _a : Dict=37 , _a : int="gelu" , _a : Optional[int]=0.1 , _a : Union[str, Any]=0.1 , _a : int=10 , _a : Optional[int]=0.02 , _a : List[str]=[1, 16, 4, 4] , _a : Union[str, Any]=None , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_SCREAMING_SNAKE_CASE =(self.image_size // 32) ** 2
_SCREAMING_SNAKE_CASE =num_patches + 1
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_lowerCamelCase , )
def A ( self : List[Any] , _a : Tuple , _a : Optional[Any] , _a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ViTHybridModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , _a : Tuple , _a : Union[str, Any] , _a : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.type_sequence_label_size
_SCREAMING_SNAKE_CASE =ViTHybridForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ViTHybridModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def A ( self : str ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A ( self : Dict ) -> Any:
'''simple docstring'''
pass
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_lowerCamelCase )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =_config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_lowerCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_SCREAMING_SNAKE_CASE =[f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =ViTHybridModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowerCamelCase )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_SCREAMING_SNAKE_CASE =torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
_SCREAMING_SNAKE_CASE =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_lowerCamelCase , return_tensors='pt' )
_SCREAMING_SNAKE_CASE =model(**_lowerCamelCase )
_SCREAMING_SNAKE_CASE =outputs.logits
# model predicts one of the 1000 ImageNet classes
_SCREAMING_SNAKE_CASE =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 405 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __A ( unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionLDMaDPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_( self )-> int:
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowercase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase__ = CLIPTextModel(_lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case_( self , _lowerCamelCase , _lowerCamelCase=0 )-> Optional[Any]:
if str(_lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(_lowerCamelCase )
else:
lowercase__ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_( self )-> List[str]:
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowercase__ = ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_dummy_inputs(_lowerCamelCase )
lowercase__ = ldmad_pipe(**_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb[0, -3:, -3:, -1]
lowercase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
lowercase__ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowercase__ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case_( self )-> List[str]:
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowercase__ = ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_dummy_inputs(_lowerCamelCase )
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = ldmad_pipe(**_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb_slice_a[0, -3:, -3:, -1]
lowercase__ = depth_slice_a[0, -3:, -1]
lowercase__ = self.get_dummy_inputs(_lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = ldmad_pipe.tokenizer(
_lowerCamelCase , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='''pt''' , )
lowercase__ = text_inputs['''input_ids'''].to(_lowerCamelCase )
lowercase__ = ldmad_pipe.text_encoder(_lowerCamelCase )[0]
lowercase__ = prompt_embeds
# forward
lowercase__ = ldmad_pipe(**_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb_slice_a[0, -3:, -3:, -1]
lowercase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case_( self )-> List[str]:
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
lowercase__ = StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowercase__ = ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_dummy_inputs(_lowerCamelCase )
lowercase__ = '''french fries'''
lowercase__ = ldmad_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb[0, -3:, -3:, -1]
lowercase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
lowercase__ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowercase__ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 )-> List[Any]:
lowercase__ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase__ = np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowercase__ = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowercase__ = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_( self )-> Tuple:
lowercase__ = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
lowercase__ = ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_inputs(_lowerCamelCase )
lowercase__ = ldmad_pipe(**_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = rgb[0, -3:, -3:, -1].flatten()
lowercase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
lowercase__ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowercase__ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 )-> str:
lowercase__ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase__ = np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowercase__ = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowercase__ = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_( self )-> Any:
lowercase__ = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_inputs(_lowerCamelCase )
lowercase__ = ldmad_pipe(**_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = 0.4_9_5_5_8_6
lowercase__ = 0.3_3_7_9_5_5_1_5
lowercase__ = 1_1_2.4_8_5_1_8
lowercase__ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case_( self )-> Any:
lowercase__ = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_inputs(_lowerCamelCase )
lowercase__ = ldmad_pipe(**_lowerCamelCase )
lowercase__ , lowercase__ = output.rgb, output.depth
lowercase__ = 0.4_1_9_4_1_2_7
lowercase__ = 0.3_5_3_7_5_5_8_6
lowercase__ = 0.5_6_3_8_5_0_2
lowercase__ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 161 | 0 |
'''simple docstring'''
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = len(_lowercase )
while cur > 1:
# Find the maximum number in arr
__UpperCAmelCase : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCAmelCase : Any = arr[mi::-1] + arr[mi + 1 : len(_lowercase )]
# Reverse whole list
__UpperCAmelCase : List[str] = arr[cur - 1 :: -1] + arr[cur : len(_lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCAmelCase :Any = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase :Optional[int] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted)) | 266 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = JukeboxTokenizer
SCREAMING_SNAKE_CASE : Tuple = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
import torch
__UpperCAmelCase : List[str] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__UpperCAmelCase : int = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase : List[str] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
import torch
__UpperCAmelCase : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__UpperCAmelCase : List[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase : List[str] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 266 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__A : List[str] = parser.parse_args()
if args.model_type == "roberta":
__A : int = RobertaForMaskedLM.from_pretrained(args.model_name)
__A : Optional[Any] = '''roberta'''
elif args.model_type == "gpt2":
__A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
__A : Optional[Any] = '''transformer'''
__A : List[str] = model.state_dict()
__A : Union[str, Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__A : List[Any] = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__A : Dict = F'''{prefix}.embeddings.{w}.weight'''
__A : str = state_dict[param_name]
for w in ["weight", "bias"]:
__A : Dict = F'''{prefix}.embeddings.LayerNorm.{w}'''
__A : Tuple = state_dict[param_name]
# Transformer Blocks #
__A : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__A : str = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
__A : Optional[int] = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__A : Union[str, Any] = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__A : Optional[Any] = state_dict[F'''lm_head.dense.{w}''']
__A : List[str] = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__A : Tuple = state_dict[F'''{prefix}.ln_f.{w}''']
__A : Optional[int] = state_dict['''lm_head.weight''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 499 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A : Dict = logging.get_logger(__name__)
__A : List[Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def A_ ( snake_case_ : List[DatasetType] ,snake_case_ : Optional[List[float]] = None ,snake_case_ : Optional[int] = None ,snake_case_ : Optional[DatasetInfo] = None ,snake_case_ : Optional[NamedSplit] = None ,snake_case_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ ,(Dataset, IterableDataset) ):
if isinstance(snake_case_ ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(snake_case_ )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.' )
if i == 0:
UpperCamelCase , UpperCamelCase : Dict = (
(Dataset, IterableDataset) if isinstance(snake_case_ ,snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ ,snake_case_ ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_ ,snake_case_ ,snake_case_ ,info=snake_case_ ,split=snake_case_ ,stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_ ,snake_case_ ,snake_case_ ,info=snake_case_ ,split=snake_case_ ,stopping_strategy=snake_case_ )
def A_ ( snake_case_ : List[DatasetType] ,snake_case_ : Optional[DatasetInfo] = None ,snake_case_ : Optional[NamedSplit] = None ,snake_case_ : int = 0 ,):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ ,(Dataset, IterableDataset) ):
if isinstance(snake_case_ ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(snake_case_ )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.' )
if i == 0:
UpperCamelCase , UpperCamelCase : List[str] = (
(Dataset, IterableDataset) if isinstance(snake_case_ ,snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ ,snake_case_ ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_ ,info=snake_case_ ,split=snake_case_ ,axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_ ,info=snake_case_ ,split=snake_case_ ,axis=snake_case_ )
| 499 | 1 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
def __call__( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : List[str] =self.unet(__lowercase , __lowercase ).sample
SCREAMING_SNAKE_CASE__ : List[str] =self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : Any =scheduler_output - scheduler_output + torch.ones_like(__lowercase )
return result | 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 665 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["OwlViTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = 384
_UpperCamelCase = 7
if "tiny" in model_name:
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 6, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif "small" in model_name:
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif "base" in model_name:
_UpperCamelCase = 128
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (4, 8, 16, 32)
_UpperCamelCase = 12
_UpperCamelCase = 512
elif "large" in model_name:
_UpperCamelCase = 192
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (6, 12, 24, 48)
_UpperCamelCase = 12
_UpperCamelCase = 768
# set label information
_UpperCamelCase = 150
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "ade20k-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = SwinConfig(
embed_dim=a__ , depths=a__ , num_heads=a__ , window_size=a__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
_UpperCamelCase = UperNetConfig(
backbone_config=a__ , auxiliary_in_channels=a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ , )
return config
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_UpperCamelCase = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:dim, :]
_UpperCamelCase = in_proj_bias[: dim]
_UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase = in_proj_weight[
-dim :, :
]
_UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = x.shape
_UpperCamelCase = x.reshape(a__ , 4 , in_channel // 4 )
_UpperCamelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(a__ , a__ )
return x
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = x.shape
_UpperCamelCase = x.reshape(a__ , in_channel // 4 , 4 )
_UpperCamelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(a__ , a__ )
return x
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = x.shape[0]
_UpperCamelCase = x.reshape(4 , in_channel // 4 )
_UpperCamelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(a__ )
return x
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = x.shape[0]
_UpperCamelCase = x.reshape(in_channel // 4 , 4 )
_UpperCamelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(a__ )
return x
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
_UpperCamelCase = model_name_to_url[model_name]
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location="cpu" , file_name=a__ )[
"state_dict"
]
for name, param in state_dict.items():
print(a__ , param.shape )
_UpperCamelCase = get_upernet_config(a__ )
_UpperCamelCase = UperNetForSemanticSegmentation(a__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCamelCase = state_dict.pop(a__ )
if "bn" in key:
_UpperCamelCase = key.replace("bn" , "batch_norm" )
_UpperCamelCase = val
# rename keys
_UpperCamelCase = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_UpperCamelCase = reverse_correct_unfold_reduction_order(a__ )
if "norm" in key:
_UpperCamelCase = reverse_correct_unfold_norm_order(a__ )
model.load_state_dict(a__ )
# verify on image
_UpperCamelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("RGB" )
_UpperCamelCase = SegformerImageProcessor()
_UpperCamelCase = processor(a__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
_UpperCamelCase = model(a__ )
_UpperCamelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_UpperCamelCase = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_UpperCamelCase = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_UpperCamelCase = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(a__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
def _A ( __magic_name__ , __magic_name__ ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__lowerCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase__ = processors[data_args.task_name]()
lowercase__ = processor.get_labels()
lowercase__ = len(__magic_name__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
lowercase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
lowercase__ = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __magic_name__ , __magic_name__ )
writer.write("%s = %s\n" % (key, value) )
results.update(__magic_name__ )
return results
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCamelCase_ ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''FlavaImageProcessor'''
UpperCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self :Optional[int] , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :Any=None , **__UpperCamelCase :Any ):
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
A = kwargs.pop("feature_extractor" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
A = self.image_processor
def __call__( self :Union[str, Any] , __UpperCamelCase :List[Any] = None , __UpperCamelCase :List[Any] = None , __UpperCamelCase :int = True , __UpperCamelCase :List[Any] = False , __UpperCamelCase :Union[str, Any] = False , __UpperCamelCase :List[Any] = None , __UpperCamelCase :Tuple = 0 , __UpperCamelCase :int = None , __UpperCamelCase :Optional[Any] = None , __UpperCamelCase :Optional[int] = None , __UpperCamelCase :Union[str, Any] = None , __UpperCamelCase :int = None , __UpperCamelCase :List[Any] = False , __UpperCamelCase :Dict = False , __UpperCamelCase :int = False , __UpperCamelCase :List[str] = False , __UpperCamelCase :Optional[Any] = True , __UpperCamelCase :Union[str, Any] = None , **__UpperCamelCase :str , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
A = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] , *__UpperCamelCase :int , **__UpperCamelCase :Dict ):
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :int , *__UpperCamelCase :Tuple , **__UpperCamelCase :List[str] ):
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCamelCase ( self :Union[str, Any] ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self :Tuple ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase ( self :int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 719 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case : int = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_snake_case : Union[str, Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_snake_case : Dict = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self :int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCamelCase ( self :str , __UpperCamelCase :List[List[List[str]]] , __UpperCamelCase :List[List[str]] , __UpperCamelCase :int = 1 , __UpperCamelCase :int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase , hypotheses=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase )
}
| 524 | 0 |
from math import pi
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 73 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 | 0 |
from collections import defaultdict
from math import gcd
def __lowercase ( lowerCamelCase_ : int = 1500000 ):
SCREAMING_SNAKE_CASE__ = defaultdict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
SCREAMING_SNAKE_CASE__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ = "▁" , UpperCAmelCase__ = True , UpperCAmelCase__ = "<unk>" , UpperCAmelCase__ = "</s>" , UpperCAmelCase__ = "<pad>" , ):
SCREAMING_SNAKE_CASE__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
SCREAMING_SNAKE_CASE__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
SCREAMING_SNAKE_CASE__ = token_dict["token"]
SCREAMING_SNAKE_CASE__ = Tokenizer(Unigram() )
SCREAMING_SNAKE_CASE__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
SCREAMING_SNAKE_CASE__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase__ ),
pre_tokenizers.Punctuation(),
] )
SCREAMING_SNAKE_CASE__ = decoders.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
SCREAMING_SNAKE_CASE__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = 8000 , UpperCAmelCase__ = True , ):
SCREAMING_SNAKE_CASE__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = [files]
self._tokenizer.train(UpperCAmelCase__ , trainer=UpperCAmelCase__ )
self.add_unk_id()
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = 8000 , UpperCAmelCase__ = True , ):
SCREAMING_SNAKE_CASE__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
self._tokenizer.train_from_iterator(UpperCAmelCase__ , trainer=UpperCAmelCase__ )
self.add_unk_id()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = json.loads(self._tokenizer.to_str() )
SCREAMING_SNAKE_CASE__ = self.special_tokens["unk"]["id"]
SCREAMING_SNAKE_CASE__ = Tokenizer.from_str(json.dumps(UpperCAmelCase__ ) )
| 112 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
def __init__( self : int,__A : Collection[float] | None = None ):
if components is None:
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
def __len__( self : Tuple ):
return len(self.__components )
def __str__( self : List[Any] ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE__,self.__components ) ) + ")"
def __add__( self : Tuple,__A : Vector ):
_lowerCamelCase : Any = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : List[Any] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else:
raise Exception("must have the same size" )
def __sub__( self : str,__A : Vector ):
_lowerCamelCase : int = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Optional[Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : List[str],__A : float ):
...
@overload
def __mul__( self : Dict,__A : Vector ):
...
def __mul__( self : int,__A : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE__,(float, int) ):
_lowerCamelCase : str = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ) and len(self ) == len(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Tuple = len(self )
_lowerCamelCase : List[Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return sum(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception("invalid operand!" )
def lowerCamelCase_ ( self : Optional[int] ):
return Vector(self.__components )
def lowerCamelCase_ ( self : Union[str, Any],__A : int ):
if isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def lowerCamelCase_ ( self : str,__A : int,__A : float ):
assert -len(self.__components ) <= pos < len(self.__components )
_lowerCamelCase : Optional[int] = value
def lowerCamelCase_ ( self : Tuple ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
_lowerCamelCase : Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE__ ) )
def lowerCamelCase_ ( self : Dict,__A : Vector,__A : bool = False ):
_lowerCamelCase : List[str] = self * other
_lowerCamelCase : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
return Vector([0] * dimension )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ))
_lowerCamelCase : int = [0] * dimension
_lowerCamelCase : List[Any] = 1
return Vector(lowerCAmelCase_ )
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : Vector , _lowerCAmelCase : Vector ):
"""simple docstring"""
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (isinstance(lowerCAmelCase_ , (int, float) ))
)
return x * scalar + y
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(lowerCAmelCase_ )
_lowerCamelCase : Any = [random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : list[list[float]],__A : int,__A : int ):
_lowerCamelCase : str = matrix
_lowerCamelCase : int = w
_lowerCamelCase : Optional[Any] = h
def __str__( self : List[str] ):
_lowerCamelCase : int = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any],__A : Matrix ):
if self.__width == other.width() and self.__height == other.height():
_lowerCamelCase : str = []
for i in range(self.__height ):
_lowerCamelCase : Optional[int] = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__,self.__width,self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int],__A : Matrix ):
if self.__width == other.width() and self.__height == other.height():
_lowerCamelCase : str = []
for i in range(self.__height ):
_lowerCamelCase : List[str] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__,self.__width,self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Optional[int],__A : float ):
...
@overload
def __mul__( self : Tuple,__A : Vector ):
...
def __mul__( self : List[Any],__A : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ): # matrix-vector
if len(SCREAMING_SNAKE_CASE__ ) == self.__width:
_lowerCamelCase : Dict = zero_vector(self.__height )
for i in range(self.__height ):
_lowerCamelCase : List[str] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE__,sum(SCREAMING_SNAKE_CASE__ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE__,(int, float) ): # matrix-scalar
_lowerCamelCase : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE__,self.__width,self.__height )
return None
def lowerCamelCase_ ( self : Dict ):
return self.__height
def lowerCamelCase_ ( self : Dict ):
return self.__width
def lowerCamelCase_ ( self : int,__A : int,__A : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def lowerCamelCase_ ( self : Optional[int],__A : int,__A : int,__A : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
_lowerCamelCase : Optional[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
_lowerCamelCase : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_lowerCamelCase : str = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE__,self.__width - 1,self.__height - 1 ).determinant()
def lowerCamelCase_ ( self : List[Any],__A : int,__A : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ )
else:
raise Exception("Indices out of bounds" )
def lowerCamelCase_ ( self : Any ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_lowerCamelCase : int = [
self.__matrix[0][y] * self.cofactor(0,SCREAMING_SNAKE_CASE__ ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE__ )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = [[0] * n for _ in range(lowerCAmelCase_ )]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(lowerCAmelCase_ )
_lowerCamelCase : Tuple = [
[random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )
]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) | 44 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
lowerCAmelCase__ = import_module("tasks" )
try:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , model_args.task_type )
lowerCAmelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase__ = dict(enumerate(lowerCAmelCase_ ) )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=2 )
lowerCAmelCase__ , lowerCAmelCase__ = preds.shape
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCAmelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"precision": precision_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"recall": recall_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"f1": fa_score(lowerCAmelCase_ , lowerCAmelCase_ ),
}
# Data collator
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase_ )
# Predict
if training_args.do_predict:
lowerCAmelCase__ = TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return results
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 61 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : int = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = """canine"""
def __init__( self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=16384 , __UpperCamelCase=16 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=0xe_0_0_0 , __UpperCamelCase=0xe_0_0_1 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=8 , __UpperCamelCase=16384 , __UpperCamelCase=128 , **__UpperCamelCase , ):
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A_ = max_position_embeddings
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Character config:
A_ = downsampling_rate
A_ = upsampling_kernel_size
A_ = num_hash_functions
A_ = num_hash_buckets
A_ = local_transformer_stride
| 608 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : int = {'vocab_file': 'spiece.model'}
__magic_name__ : int = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ : str = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
def __init__( self , __UpperCamelCase , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[MASK]" , __UpperCamelCase="[CLS]" , __UpperCamelCase = None , **__UpperCamelCase , ):
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , mask_token=__UpperCamelCase , cls_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
A_ = vocab_file
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def lowercase_ ( self ):
return self.sp_model.get_piece_size()
def lowercase_ ( self ):
A_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self , __UpperCamelCase ):
A_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __UpperCamelCase ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase ):
return self.sp_model.piece_to_id(__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase ):
A_ = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def lowercase_ ( self , __UpperCamelCase ):
A_ = []
A_ = ""
A_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A_ = True
A_ = []
else:
current_sub_tokens.append(__UpperCamelCase )
A_ = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
A_ = kwargs.pop("use_source_tokenizer" , __UpperCamelCase )
A_ = self.convert_ids_to_tokens(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ = []
A_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
A_ = []
sub_texts.append(__UpperCamelCase )
else:
current_sub_text.append(__UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(__UpperCamelCase ) )
else:
A_ = "".join(__UpperCamelCase )
A_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ = self.clean_up_tokenization(__UpperCamelCase )
return clean_text
else:
return text
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 608 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Optional[Any] = "pt"
elif is_tf_available():
A : List[Any] = "tf"
else:
A : Dict = "jax"
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = ByTaTokenizer
snake_case_ : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__snake_case = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ) -> int:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[Any]=2_0 , SCREAMING_SNAKE_CASE : Optional[int]=5 ) -> Optional[int]:
'''simple docstring'''
__snake_case = []
for i in range(len(UpperCamelCase__ ) ):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case = list(filter(lambda SCREAMING_SNAKE_CASE : re.match(R"^[ a-zA-Z]+$" , t[1] ) , UpperCamelCase__ ) )
__snake_case = list(filter(lambda SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
__snake_case = " " + output_txt
__snake_case = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case = self.ta_base_tokenizer
__snake_case = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
__snake_case = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case = self.ta_base_tokenizer
__snake_case = "Unicode €."
__snake_case = tokenizer(UpperCamelCase__ )
__snake_case = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
__snake_case = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "Unicode €.</s>" )
__snake_case = tokenizer("e è é ê ë" )
__snake_case = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
__snake_case = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case = self.ta_base_tokenizer
__snake_case = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__snake_case = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
__snake_case = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0] )
else:
__snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case = self.ta_base_tokenizer
__snake_case = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__snake_case = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCamelCase__ )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertNotIn("decoder_input_ids" , UpperCamelCase__ )
self.assertNotIn("decoder_attention_mask" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case = self.ta_base_tokenizer
__snake_case = [
"Summary of the text.",
"Another summary.",
]
__snake_case = tokenizer(
text_target=UpperCamelCase__ , max_length=3_2 , padding="max_length" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case = self.ta_base_tokenizer
__snake_case = ["A long paragraph for summarization. </s>"]
__snake_case = ["Summary of the text. </s>"]
# fmt: off
__snake_case = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
__snake_case = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
__snake_case = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["input_ids"][0] )
self.assertEqual(UpperCamelCase__ , batch["labels"][0] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = " He is very happy, UNwant\u00E9d,running"
__snake_case = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__snake_case = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
__snake_case = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
__snake_case = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__snake_case = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__snake_case = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
__snake_case = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__snake_case = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__snake_case = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__snake_case = json.load(UpperCamelCase__ )
__snake_case = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
__snake_case = added_tokens_extra_ids + [
"an_additional_special_token"
]
__snake_case = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCamelCase__ )]
__snake_case = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
__snake_case = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([2_5_5] ) == "" )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
__snake_case = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__snake_case = 0
__snake_case = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [] )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 371 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def A ( self : str , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(UpperCamelCase__ , 'weight' , UpperCamelCase__ , {} )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase__ ).split('.' )[1]}} )
UpperCamelCase = load_offloaded_weight(UpperCamelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1': 0, 'a.2': 2} )
UpperCamelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 430 | 0 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( _snake_case ):
"""simple docstring"""
__UpperCamelCase = """philschmid/bart-large-cnn-samsum"""
__UpperCamelCase = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
__UpperCamelCase = """summarizer"""
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ["""text"""]
__UpperCamelCase = ["""text"""]
def __lowerCAmelCase ( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(snake_case_ , return_tensors='''pt''' , truncation=snake_case_ )
def __lowerCAmelCase ( self : int , A__ : Any ) -> str:
'''simple docstring'''
return self.model.generate(**snake_case_ )[0]
def __lowerCAmelCase ( self : List[Any] , A__ : Any ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
| 721 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = LEDConfig
__UpperCamelCase = {}
__UpperCamelCase = "gelu"
def __init__( self : str , A__ : Union[str, Any] , A__ : str=1_3 , A__ : Optional[int]=7 , A__ : str=True , A__ : Union[str, Any]=False , A__ : Dict=9_9 , A__ : Any=3_2 , A__ : Optional[int]=2 , A__ : Union[str, Any]=4 , A__ : Tuple=3_7 , A__ : str=0.1 , A__ : Any=0.1 , A__ : str=2_0 , A__ : Dict=2 , A__ : Union[str, Any]=1 , A__ : int=0 , A__ : Dict=4 , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = parent
a__ : Dict = batch_size
a__ : List[str] = seq_length
a__ : Optional[int] = is_training
a__ : Dict = use_labels
a__ : Dict = vocab_size
a__ : Tuple = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : List[Any] = intermediate_size
a__ : Tuple = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : Optional[int] = eos_token_id
a__ : Optional[int] = pad_token_id
a__ : int = bos_token_id
a__ : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ : int = prepare_led_inputs_dict(A__ , A__ , A__ )
a__ : List[str] = tf.concat(
[tf.zeros_like(A__ )[:, :-1], tf.ones_like(A__ )[:, -1:]] , axis=-1 , )
a__ : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = TFLEDModel(config=A__ ).get_decoder()
a__ : Dict = inputs_dict['''input_ids''']
a__ : List[str] = input_ids[:1, :]
a__ : Any = inputs_dict['''attention_mask'''][:1, :]
a__ : Dict = 1
# first forward pass
a__ : Any = model(A__ , attention_mask=A__ , use_cache=A__ )
a__ , a__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : List[Any] = model(A__ , attention_mask=A__ )[0]
a__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
a__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1E-3 )
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : str=None , ):
if attention_mask is None:
a__ : str = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = TFLEDModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] = tf.zeros_like(inputs_dict['''attention_mask'''] )
a__ : List[str] = 2
a__ : int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
a__ : Dict = True
a__ : Optional[int] = self.model_tester.seq_length
a__ : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(A__ : int ):
a__ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(A__ : Any ):
a__ : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
a__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ : Any = True
a__ : List[str] = False
a__ : int = False
a__ : Union[str, Any] = model_class(A__ )
a__ : List[str] = model(self._prepare_for_class(A__ , A__ ) )
a__ : Optional[Any] = len(A__ )
self.assertEqual(config.output_hidden_states , A__ )
check_encoder_attentions_output(A__ )
if self.is_encoder_decoder:
a__ : List[Any] = model_class(A__ )
a__ : Optional[Any] = model(self._prepare_for_class(A__ , A__ ) )
self.assertEqual(config.output_hidden_states , A__ )
check_decoder_attentions_output(A__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ : List[Any] = True
a__ : int = model_class(A__ )
a__ : Union[str, Any] = model(self._prepare_for_class(A__ , A__ ) )
self.assertEqual(config.output_hidden_states , A__ )
check_encoder_attentions_output(A__ )
# Check attention is always last and order is fine
a__ : str = True
a__ : str = True
a__ : Tuple = model_class(A__ )
a__ : Any = model(self._prepare_for_class(A__ , A__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A__ ) )
self.assertEqual(model.config.output_hidden_states , A__ )
check_encoder_attentions_output(A__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
pass
def __a ( lowerCAmelCase__ : str ):
return tf.constant(lowerCAmelCase__ , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
a__ : Tuple = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : List[Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : Tuple = prepare_led_inputs_dict(model.config , A__ , A__ )
a__ : Optional[Any] = model(**A__ )[0]
a__ : Optional[Any] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , A__ )
# change to expected output here
a__ : Any = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-3 )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
a__ : List[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : str = prepare_led_inputs_dict(model.config , A__ , A__ )
a__ : Optional[int] = model(**A__ )[0]
a__ : List[str] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , A__ )
# change to expected output here
a__ : Optional[Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-3 , rtol=1E-3 )
| 340 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Any ):
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCAmelCase : Union[str, Any] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : List[str] , **A : List[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Dict , **A : Optional[Any] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Any , **A : Optional[Any] ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : Any = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Optional[int] = AlignProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : List[str] = AlignProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def snake_case_ ( self : str ):
_UpperCAmelCase : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : int = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = self.prepare_image_inputs()
_UpperCAmelCase : Tuple = image_processor(A , return_tensors="np" )
_UpperCAmelCase : Tuple = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = "lower newer"
_UpperCAmelCase : List[Any] = processor(text=A )
_UpperCAmelCase : List[Any] = tokenizer(A , padding="max_length" , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : str ):
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Any = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Any = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : int = processor.batch_decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Tuple = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = "lower newer"
_UpperCAmelCase : Any = self.prepare_image_inputs()
_UpperCAmelCase : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 289 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase : Any = HfApi()
_lowerCAmelCase : int = {}
# fmt: off
_lowerCAmelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCAmelCase : Any = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCAmelCase : List[str] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCAmelCase : Optional[int] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCAmelCase : List[Any] = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCAmelCase : Optional[int] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCAmelCase : Optional[Any] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCAmelCase : List[str] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCAmelCase : Any = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCAmelCase : int = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCAmelCase : str = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCAmelCase : Optional[int] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCAmelCase : Union[str, Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCAmelCase : Any = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCAmelCase : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase : List[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase : Tuple = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase : Tuple = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 289 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Tuple = '''levit'''
def __init__( self :Optional[int] , lowercase__ :Optional[int]=224 , lowercase__ :Dict=3 , lowercase__ :Union[str, Any]=3 , lowercase__ :Optional[int]=2 , lowercase__ :List[str]=1 , lowercase__ :Optional[int]=16 , lowercase__ :str=[128, 256, 384] , lowercase__ :Dict=[4, 8, 12] , lowercase__ :Any=[4, 4, 4] , lowercase__ :int=[16, 16, 16] , lowercase__ :Any=0 , lowercase__ :Dict=[2, 2, 2] , lowercase__ :List[Any]=[2, 2, 2] , lowercase__ :List[Any]=0.02 , **lowercase__ :Dict , ):
super().__init__(**lowercase__ )
lowercase = image_size
lowercase = num_channels
lowercase = kernel_size
lowercase = stride
lowercase = padding
lowercase = hidden_sizes
lowercase = num_attention_heads
lowercase = depths
lowercase = key_dim
lowercase = drop_path_rate
lowercase = patch_size
lowercase = attention_ratio
lowercase = mlp_ratio
lowercase = initializer_range
lowercase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Any = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self :str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self :int ):
return 1E-4
| 718 |
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = 0
lowercase = len(_UpperCAmelCase ) # No of vertices in graph
lowercase = [0] * n
lowercase = [False] * n
def dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase = True
lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , id_ )
lowercase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase = min(low[at] , low[to] )
lowercase = []
for i in range(_UpperCAmelCase ):
if not visited[i]:
dfs(_UpperCAmelCase , -1 , _UpperCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , A_ : NestedDataStructureLike[PathLike] , A_ : Optional[NamedSplit] = None , A_ : Optional[Features] = None , A_ : str = None , A_ : bool = False , A_ : bool = False , A_ : Optional[str] = None , A_ : Optional[int] = None , **A_ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
lowerCamelCase_: Union[str, Any] = field
lowerCamelCase_: Union[str, Any] = path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
lowerCamelCase_: Optional[int] = Json(
cache_dir=A_ , data_files=A_ , features=A_ , field=A_ , **A_ , )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
# Build iterable dataset
if self.streaming:
lowerCamelCase_: Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase_: Tuple = None
lowerCamelCase_: Dict = None
lowerCamelCase_: Union[str, Any] = None
lowerCamelCase_: Union[str, Any] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
lowerCamelCase_: Dict = self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self : List[Any] , A_ : Dataset , A_ : Union[PathLike, BinaryIO] , A_ : Optional[int] = None , A_ : Optional[int] = None , **A_ : int , ) -> Union[str, Any]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowerCamelCase_: List[Any] = dataset
lowerCamelCase_: List[str] = path_or_buf
lowerCamelCase_: List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase_: Optional[int] = num_proc
lowerCamelCase_: Optional[Any] = """utf-8"""
lowerCamelCase_: List[str] = to_json_kwargs
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_: Tuple = self.to_json_kwargs.pop("""path_or_buf""" , A_ )
lowerCamelCase_: Union[str, Any] = self.to_json_kwargs.pop("""orient""" , """records""" )
lowerCamelCase_: Any = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
lowerCamelCase_: Tuple = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
lowerCamelCase_: int = self.to_json_kwargs.pop("""compression""" , A_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=A_ ) as buffer:
lowerCamelCase_: Optional[Any] = self._write(file_obj=A_ , orient=A_ , lines=A_ , index=A_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
lowerCamelCase_: List[Any] = self._write(
file_obj=self.path_or_buf , orient=A_ , lines=A_ , index=A_ , **self.to_json_kwargs )
return written
def lowerCAmelCase ( self : Tuple , A_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: Dict = args
lowerCamelCase_: Tuple = query_table(
table=self.dataset.data , key=slice(A_ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCamelCase_: Optional[int] = batch.to_pandas().to_json(
path_or_buf=A_ , orient=A_ , lines=A_ , index=A_ , **A_ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase ( self : Any , A_ : BinaryIO , A_ : Union[str, Any] , A_ : str , A_ : Tuple , **A_ : List[str] , ) -> int:
"""simple docstring"""
lowerCamelCase_: str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
lowerCamelCase_: Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A_ )
else:
lowerCamelCase_ , lowerCamelCase_: str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A_ , A_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(A_ )
return written
| 423 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : int = logging.get_logger(__name__)
lowercase : int = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_A = "convnextv2"
def __init__( self : List[str] , A_ : Any=3 , A_ : Tuple=4 , A_ : Dict=4 , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : Optional[int]="gelu" , A_ : Optional[Any]=0.02 , A_ : Optional[int]=1e-12 , A_ : Tuple=0.0 , A_ : int=2_24 , A_ : int=None , A_ : Optional[int]=None , **A_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: Tuple = num_channels
lowerCamelCase_: Union[str, Any] = patch_size
lowerCamelCase_: Any = num_stages
lowerCamelCase_: Optional[Any] = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
lowerCamelCase_: Optional[Any] = [3, 3, 9, 3] if depths is None else depths
lowerCamelCase_: Tuple = hidden_act
lowerCamelCase_: Optional[int] = initializer_range
lowerCamelCase_: Dict = layer_norm_eps
lowerCamelCase_: Tuple = drop_path_rate
lowerCamelCase_: Any = image_size
lowerCamelCase_: Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_ , lowerCamelCase_: int = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 423 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCamelCase :List[Any] = ['''gpt2''']
lowerCamelCase :Any = '''gpt2'''
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : Optional[Any] = tokenizer
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
A_ : Union[str, Any] = TFGPTaLMHeadModel.from_config(lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _a (self , lowercase ):
A_ : List[Any] = self.tokenizer(lowercase )
A_ : Tuple = tokenized["""input_ids"""].to_tensor()
A_ : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A_ : Optional[Any] = self.model(input_ids=lowercase , attention_mask=lowercase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
super().setUp()
A_ : Union[str, Any] = [GPTaTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A_ : int = [TFGPTaTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ : Union[str, Any] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
A_ : List[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _a (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A_ : Union[str, Any] = tokenizer([test_inputs] , return_tensors="""tf""" )
A_ : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A_ : Any = python_outputs[key].numpy()
A_ : Optional[int] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def _a (self ):
for tf_tokenizer in self.tf_tokenizers:
A_ : Any = tf.function(lowercase )
for test_inputs in self.test_sentences:
A_ : int = tf.constant(lowercase )
A_ : List[Any] = compiled_tokenizer(lowercase )
A_ : str = tf_tokenizer(lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _a (self ):
for tf_tokenizer in self.tf_tokenizers:
A_ : List[Any] = ModelToSave(tokenizer=lowercase )
A_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Optional[Any] = model.serving(lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ : Any = Path(lowercase ) / """saved.model"""
tf.saved_model.save(lowercase , lowercase , signatures={"""serving_default""": model.serving} )
A_ : Dict = tf.saved_model.load(lowercase )
A_ : str = loaded_model.signatures["""serving_default"""](lowercase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _a (self ):
for tf_tokenizer in self.tf_tokenizers:
A_ : Any = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Optional[int] = tf_tokenizer(lowercase ) # Build model with some sample inputs
A_ : List[Any] = tf_tokenizer.get_config()
A_ : Dict = TFGPTaTokenizer.from_config(lowercase )
A_ : Optional[int] = model_from_config(lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _a (self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A_ : Any = 123123
for max_length in [3, 5, 1024]:
A_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : str = tf_tokenizer(lowercase , max_length=lowercase )
A_ : Optional[int] = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _A, _A, _A, _A ):
"""simple docstring"""
snake_case_ :Any = multiprocessing.Manager()
snake_case_ :Any = manager.list()
snake_case_ :List[str] = multiprocessing.Process(target=_A, args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _A, _A, _A ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
snake_case_ :Any = shutil.rmtree
snake_case_ :Any = os.rmdir
snake_case_ :str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
snake_case_ :List[str] = {}
with swallow_io():
with time_limit(_A ):
exec(_A, _A )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
snake_case_ :Dict = rmtree
snake_case_ :int = rmdir
snake_case_ :str = chdir
@contextlib.contextmanager
def A ( _A ):
"""simple docstring"""
def signal_handler(_A, _A ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL, _A )
signal.signal(signal.SIGALRM, _A )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0 )
@contextlib.contextmanager
def A ( ):
"""simple docstring"""
snake_case_ :List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(_A ):
with contextlib.redirect_stderr(_A ):
with redirect_stdin(_A ):
yield
@contextlib.contextmanager
def A ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(_A ):
yield dirname
class __lowerCAmelCase (__UpperCamelCase ):
'''simple docstring'''
pass
class __lowerCAmelCase (io.StringIO ):
'''simple docstring'''
def _a ( self , *a , **a ):
"""simple docstring"""
raise OSError
def _a ( self , *a , **a ):
"""simple docstring"""
raise OSError
def _a ( self , *a , **a ):
"""simple docstring"""
raise OSError
def _a ( self , *a , **a ):
"""simple docstring"""
return False
class __lowerCAmelCase (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
a__ = 'stdin'
@contextlib.contextmanager
def A ( _A ):
"""simple docstring"""
if root == ".":
yield
return
snake_case_ :str = os.getcwd()
os.chdir(_A )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_A )
def A ( _A=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
snake_case_ :Any = None
snake_case_ :List[Any] = None
import os
snake_case_ :Tuple = "1"
snake_case_ :Union[str, Any] = None
snake_case_ :Optional[Any] = None
snake_case_ :Dict = None
snake_case_ :Any = None
snake_case_ :Optional[int] = None
snake_case_ :List[str] = None
snake_case_ :int = None
snake_case_ :Union[str, Any] = None
snake_case_ :Tuple = None
snake_case_ :Union[str, Any] = None
snake_case_ :Tuple = None
snake_case_ :int = None
snake_case_ :str = None
snake_case_ :List[str] = None
snake_case_ :int = None
snake_case_ :Union[str, Any] = None
snake_case_ :Any = None
snake_case_ :Union[str, Any] = None
snake_case_ :Optional[int] = None
snake_case_ :List[str] = None
snake_case_ :Any = None
snake_case_ :Tuple = None
snake_case_ :Any = None
snake_case_ :Union[str, Any] = None
snake_case_ :Optional[Any] = None
snake_case_ :Any = None
snake_case_ :Any = None
import shutil
snake_case_ :Dict = None
snake_case_ :str = None
snake_case_ :Tuple = None
import subprocess
snake_case_ :Dict = None # type: ignore
snake_case_ :str = None
import sys
snake_case_ :Optional[Any] = None
snake_case_ :Tuple = None
snake_case_ :Optional[int] = None
snake_case_ :Any = None
snake_case_ :Tuple = None
| 584 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
snake_case_ :Dict = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case_ :int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Tuple = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ :Any = model(a )["last_hidden_state"].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
snake_case_ :List[str] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case_ :Any = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Tuple = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ :str = model(a )["last_hidden_state"].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
| 584 | 1 |
def _snake_case (_snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_lowercase =set()
# Replace all the whitespace in our sentence
_lowercase =input_str.replace(' ' , '')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(__UpperCamelCase) == 26
def _snake_case (_snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_lowercase =[False] * 26
for char in input_str:
if char.islower():
_lowercase =True
elif char.isupper():
_lowercase =True
return all(__UpperCamelCase)
def _snake_case (_snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def _snake_case () -> None:
from timeit import timeit
_lowercase ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('is_pangram()' , setup=__UpperCamelCase))
print(timeit('is_pangram_faster()' , setup=__UpperCamelCase))
print(timeit('is_pangram_fastest()' , setup=__UpperCamelCase))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 711 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def _snake_case (_snake_case : str , _snake_case : str) -> tuple[str, float]:
_lowercase =len([g for position, g in enumerate(_snake_case) if g == main_target[position]])
return (item, float(_snake_case))
def _snake_case (_snake_case : str , _snake_case : str) -> tuple[str, str]:
_lowercase =random.randint(0 , len(_snake_case) - 1)
_lowercase =parent_a[:random_slice] + parent_a[random_slice:]
_lowercase =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case (_snake_case : str , _snake_case : list[str]) -> str:
_lowercase =list(_snake_case)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
_lowercase =random.choice(_snake_case)
return "".join(_snake_case)
def _snake_case (_snake_case : tuple[str, float] , _snake_case : list[tuple[str, float]] , _snake_case : list[str] , ) -> list[str]:
_lowercase =[]
# Generate more children proportionally to the fitness score.
_lowercase =int(parent_a[1] * 100) + 1
_lowercase =10 if child_n >= 10 else child_n
for _ in range(_snake_case):
_lowercase =population_score[random.randint(0 , _snake_case)][0]
_lowercase , _lowercase =crossover(parent_a[0] , _snake_case)
# Append new string to the population list.
pop.append(mutate(_snake_case , _snake_case))
pop.append(mutate(_snake_case , _snake_case))
return pop
def _snake_case (_snake_case : str , _snake_case : list[str] , _snake_case : bool = True) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowercase =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case)
# Verify that the target contains no genes besides the ones inside genes variable.
_lowercase =sorted({c for c in target if c not in genes})
if not_in_genes_list:
_lowercase =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case)
# Generate random starting population.
_lowercase =[]
for _ in range(_snake_case):
population.append(''.join([random.choice(_snake_case) for i in range(len(_snake_case))]))
# Just some logs to know what the algorithms is doing.
_lowercase , _lowercase =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowercase =[evaluate(_snake_case , _snake_case) for item in population]
# Check if there is a matching evolution.
_lowercase =sorted(_snake_case , key=lambda _snake_case: x[1] , reverse=_snake_case)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowercase =population[: int(N_POPULATION / 3)]
population.clear()
population.extend(_snake_case)
# Normalize population score to be between 0 and 1.
_lowercase =[
(item, score / len(_snake_case)) for item, score in population_score
]
# This is selection
for i in range(_snake_case):
population.extend(select(population_score[int(_snake_case)] , _snake_case , _snake_case))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 557 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : dict[int, list[int]] ):
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(SCREAMING_SNAKE_CASE ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE , -1 , SCREAMING_SNAKE_CASE , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase = remove_duplicates(key.upper() )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase = alphabet[i - offset]
lowerCAmelCase = char
return cipher_alphabet
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
lowerCAmelCase = input("""Enter keyword: """ ).strip()
lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
lowerCAmelCase = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 532 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCAmelCase :Union[str, Any] = "true"
def _a ( _lowercase : int , _lowercase : int=82 , _lowercase : Union[str, Any]=16 ):
'''simple docstring'''
set_seed(42 )
__UpperCAmelCase : Any = RegressionModel()
__UpperCAmelCase : Union[str, Any] = deepcopy(_lowercase )
__UpperCAmelCase : str = RegressionDataset(length=_lowercase )
__UpperCAmelCase : List[str] = DataLoader(_lowercase , batch_size=_lowercase )
model.to(accelerator.device )
__UpperCAmelCase , __UpperCAmelCase : Any = accelerator.prepare(_lowercase , _lowercase )
return model, ddp_model, dataloader
def _a ( _lowercase : Accelerator , _lowercase : int=False ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__UpperCAmelCase : Optional[int] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(_lowercase : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
with accelerator.main_process_first():
__UpperCAmelCase : Optional[int] = dataset.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase : Optional[Any] ):
if use_longest:
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(_lowercase , shuffle=_lowercase , collate_fn=_lowercase , batch_size=16 )
def _a ( _lowercase : str , _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Accelerator(dispatch_batches=_lowercase , split_batches=_lowercase )
__UpperCAmelCase : Dict = get_dataloader(_lowercase , not dispatch_batches )
__UpperCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=_lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.prepare(_lowercase , _lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _a ( _lowercase : List[str] , _lowercase : Dict , _lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = batch.values()
with torch.no_grad():
__UpperCAmelCase : int = model(_lowercase )
__UpperCAmelCase , __UpperCAmelCase : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCAmelCase , __UpperCAmelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(_lowercase )
targs.append(_lowercase )
__UpperCAmelCase , __UpperCAmelCase : Any = torch.cat(_lowercase ), torch.cat(_lowercase )
return logits, targs
def _a ( _lowercase : Accelerator , _lowercase : List[Any]=82 , _lowercase : Optional[int]=False , _lowercase : List[Any]=False , _lowercase : Any=16 ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = get_basic_setup(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[str] = generate_predictions(_lowercase , _lowercase , _lowercase )
assert (
len(_lowercase ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowercase )}'
def _a ( _lowercase : bool = False , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : str = evaluate.load('''glue''' , '''mrpc''' )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_mrpc_setup(_lowercase , _lowercase )
# First do baseline
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = setup['''no''']
model.to(_lowercase )
model.eval()
for batch in dataloader:
batch.to(_lowercase )
with torch.inference_mode():
__UpperCAmelCase : Optional[Any] = model(**_lowercase )
__UpperCAmelCase : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowercase , references=batch['''labels'''] )
__UpperCAmelCase : List[str] = metric.compute()
# Then do distributed
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCAmelCase : Optional[int] = model(**_lowercase )
__UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : int = batch['''labels''']
__UpperCAmelCase , __UpperCAmelCase : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowercase , references=_lowercase )
__UpperCAmelCase : Union[str, Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Dict = Accelerator(split_batches=_lowercase , dispatch_batches=_lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_lowercase , _lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCAmelCase : Optional[Any] = Accelerator(split_batches=_lowercase , dispatch_batches=_lowercase )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__UpperCAmelCase : List[str] = Accelerator()
test_torch_metrics(_lowercase , 512 )
accelerator.state._reset_state()
def _a ( _lowercase : Any ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 266 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase :Optional[Any] = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase :List[Any] = "RegNetConfig"
# Base docstring
__UpperCAmelCase :List[Any] = "facebook/regnet-y-040"
__UpperCAmelCase :Union[str, Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__UpperCAmelCase :int = "facebook/regnet-y-040"
__UpperCAmelCase :Optional[Any] = "tabby, tabby cat"
__UpperCAmelCase :Dict = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : int , snake_case : int = 3 , snake_case : int = 1 , snake_case : int = 1 , snake_case : Optional[str] = "relu" , **snake_case : Any , ) -> Union[str, Any]:
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCAmelCase : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=snake_case , strides=snake_case , padding='''VALID''' , groups=snake_case , use_bias=snake_case , name='''convolution''' , )
__UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__UpperCAmelCase : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Any , snake_case : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.convolution(self.padding(snake_case ) )
__UpperCAmelCase : List[Any] = self.normalization(snake_case )
__UpperCAmelCase : Optional[Any] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case : RegNetConfig , **snake_case : Tuple ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = config.num_channels
__UpperCAmelCase : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict ) -> int:
__UpperCAmelCase : int = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCAmelCase : Dict = tf.transpose(snake_case , perm=(0, 2, 3, 1) )
__UpperCAmelCase : List[str] = self.embedder(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : int , snake_case : int = 2 , **snake_case : Tuple ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : str = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=1 , strides=snake_case , use_bias=snake_case , name='''convolution''' )
__UpperCAmelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowerCamelCase__ ( self : str , snake_case : tf.Tensor , snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(snake_case ) , training=snake_case )
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : int , snake_case : int , **snake_case : Tuple ) -> List[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
__UpperCAmelCase : Dict = [
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCamelCase__ ( self : Optional[int] , snake_case : Tuple ) -> Union[str, Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCAmelCase : str = self.pooler(snake_case )
for layer_module in self.attention:
__UpperCAmelCase : int = layer_module(snake_case )
__UpperCAmelCase : List[Any] = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : int ) -> int:
super().__init__(**snake_case )
__UpperCAmelCase : Any = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Optional[int] = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.2''' ),
]
__UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : Tuple = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : List[str] ) -> Optional[int]:
super().__init__(**snake_case )
__UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
__UpperCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Any = (
TFRegNetShortCut(snake_case , stride=snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__UpperCAmelCase : List[str] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='''layer.3''' ),
]
__UpperCAmelCase : Dict = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Any = layer_module(snake_case )
__UpperCAmelCase : int = self.shortcut(snake_case )
hidden_state += residual
__UpperCAmelCase : Optional[int] = self.activation(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 2 , snake_case : int = 2 , **snake_case : str ) -> Optional[Any]:
super().__init__(**snake_case )
__UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__UpperCAmelCase : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , snake_case , stride=snake_case , name='''layers.0''' ),
*[layer(snake_case , snake_case , snake_case , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : List[str] , snake_case : Any ) -> List[Any]:
for layer_module in self.layers:
__UpperCAmelCase : Optional[Any] = layer_module(snake_case )
return hidden_state
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case : RegNetConfig , **snake_case : int ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__UpperCAmelCase : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case , snake_case , snake_case , depth=snake_case , name=f'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , snake_case : tf.Tensor , snake_case : bool = False , snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase : Any = hidden_states + (hidden_state,)
__UpperCAmelCase : List[Any] = stage_module(snake_case )
if output_hidden_states:
__UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
@keras_serializable
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RegNetConfig
def __init__( self : Dict , snake_case : str , **snake_case : Optional[int] ) -> Any:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = config
__UpperCAmelCase : List[str] = TFRegNetEmbeddings(snake_case , name='''embedder''' )
__UpperCAmelCase : List[str] = TFRegNetEncoder(snake_case , name='''encoder''' )
__UpperCAmelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='''pooler''' )
@unpack_inputs
def lowerCamelCase__ ( self : Dict , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[Any] = self.embedder(snake_case , training=snake_case )
__UpperCAmelCase : Optional[int] = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : List[str] = encoder_outputs[0]
__UpperCAmelCase : str = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
__UpperCAmelCase : Optional[Any] = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
__UpperCAmelCase : str = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCAmelCase : Dict = tuple([tf.transpose(snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = RegNetConfig
SCREAMING_SNAKE_CASE : Tuple = "regnet"
SCREAMING_SNAKE_CASE : List[Any] = "pixel_values"
@property
def lowerCamelCase__ ( self : int ) -> List[str]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCAmelCase :Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase :List[Any] = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _a , )
class a ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case : RegNetConfig , *snake_case : Optional[int] , **snake_case : List[str] ) -> Tuple:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : Dict = TFRegNetMainLayer(snake_case , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : str=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCAmelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Dict = self.regnet(
pixel_values=snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
class a ( _a , _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : RegNetConfig , *snake_case : Optional[Any] , **snake_case : List[Any] ) -> List[Any]:
super().__init__(snake_case , *snake_case , **snake_case )
__UpperCAmelCase : List[Any] = config.num_labels
__UpperCAmelCase : Optional[int] = TFRegNetMainLayer(snake_case , name='''regnet''' )
# classification head
__UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : Tuple , snake_case : tf.Tensor = None , snake_case : tf.Tensor = None , snake_case : bool = None , snake_case : bool = None , snake_case : Tuple=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.regnet(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
__UpperCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase : Tuple = self.classifier[0](snake_case )
__UpperCAmelCase : Tuple = self.classifier[1](snake_case )
__UpperCAmelCase : Any = None if labels is None else self.hf_compute_loss(labels=snake_case , logits=snake_case )
if not return_dict:
__UpperCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) | 266 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 199 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any]=7 , snake_case__ : Tuple=3 , snake_case__ : Optional[Any]=18 , snake_case__ : Dict=30 , snake_case__ : Optional[int]=4_00 , snake_case__ : str=True , snake_case__ : str=None , snake_case__ : Optional[int]=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : Tuple = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = do_normalize
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Any = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "clusters" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case__ , "image_processor.json" )
image_processor_first.to_json_file(snake_case__ )
UpperCAmelCase__ : Any = self.image_processing_class.from_json_file(snake_case__ ).to_dict()
UpperCAmelCase__ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case__ )
UpperCAmelCase__ : List[str] = self.image_processing_class.from_pretrained(snake_case__ ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def snake_case_ ( ):
UpperCAmelCase__ : List[str] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase__ : Tuple = Image.open(dataset[4]["file"] )
UpperCAmelCase__ : List[Any] = Image.open(dataset[5]["file"] )
UpperCAmelCase__ : Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase__ : Union[str, Any] = prepare_images()
# test non-batched
UpperCAmelCase__ : Optional[Any] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
UpperCAmelCase__ : Union[str, Any] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case__ )
# test batched
UpperCAmelCase__ : List[Any] = image_processing(snake_case__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
UpperCAmelCase__ : List[str] = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case__ )
| 199 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = tokenizer
_lowercase : List[Any] = skip_prompt
_lowercase : str = decode_kwargs
# variables used in the streaming process
_lowercase : List[str] = []
_lowercase : List[str] = 0
_lowercase : Optional[int] = True
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1')
elif len(value.shape) > 1:
_lowercase : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowercase : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
_lowercase : Union[str, Any] = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith('\n'):
_lowercase : str = text[self.print_len :]
_lowercase : str = []
_lowercase : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowerCAmelCase) > 0 and self._is_chinese_char(ord(text[-1])):
_lowercase : int = text[self.print_len :]
self.print_len += len(_lowerCAmelCase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowercase : Dict = text[self.print_len : text.rfind(' ') + 1]
self.print_len += len(_lowerCAmelCase)
self.on_finalized_text(_lowerCAmelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
if len(self.token_cache) > 0:
_lowercase : Dict = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
_lowercase : Tuple = text[self.print_len :]
_lowercase : List[str] = []
_lowercase : Dict = 0
else:
_lowercase : Dict = ''
_lowercase : str = True
self.on_finalized_text(_lowerCAmelCase, stream_end=_lowerCAmelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False) -> Tuple:
"""simple docstring"""
print(_lowerCAmelCase, flush=_lowerCAmelCase, end='' if not stream_end else None)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None, **lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase)
_lowercase : Dict = Queue()
_lowercase : Optional[int] = None
_lowercase : Optional[int] = timeout
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False) -> Optional[int]:
"""simple docstring"""
self.text_queue.put(_lowerCAmelCase, timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout)
def __iter__( self) -> Union[str, Any]:
"""simple docstring"""
return self
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 709 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_lowercase : Any = DatasetInfosDict.from_directory(lowerCamelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Union[str, Any] = str(lowerCamelCase_ )
dataset_info.write_to_directory(lowerCamelCase_ )
_lowercase : List[str] = DatasetInfo.from_directory(lowerCamelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase_ , 'dataset_info.json' ) )
def UpperCamelCase_( ) -> int:
_lowercase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_lowercase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowercase : str = yaml.safe_dump(lowerCamelCase_ )
_lowercase : str = yaml.safe_load(lowerCamelCase_ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase_( ) -> int:
_lowercase : Tuple = DatasetInfo()
_lowercase : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Tuple = str(lowerCamelCase_ )
dataset_infos_dict.write_to_directory(lowerCamelCase_ )
_lowercase : Tuple = DatasetInfosDict.from_directory(lowerCamelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowercase : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowercase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase_ , 'README.md' ) )
| 354 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = (1 - _cos) / 2
SCREAMING_SNAKE_CASE__ = 1 - _cos
SCREAMING_SNAKE_CASE__ = 1 + alpha
SCREAMING_SNAKE_CASE__ = -2 * _cos
SCREAMING_SNAKE_CASE__ = 1 - alpha
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = (1 + _cos) / 2
SCREAMING_SNAKE_CASE__ = -1 - _cos
SCREAMING_SNAKE_CASE__ = 1 + alpha
SCREAMING_SNAKE_CASE__ = -2 * _cos
SCREAMING_SNAKE_CASE__ = 1 - alpha
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = _sin / 2
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = -ba
SCREAMING_SNAKE_CASE__ = 1 + alpha
SCREAMING_SNAKE_CASE__ = -2 * _cos
SCREAMING_SNAKE_CASE__ = 1 - alpha
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = 1 - alpha
SCREAMING_SNAKE_CASE__ = -2 * _cos
SCREAMING_SNAKE_CASE__ = 1 + alpha
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE__ = 1 + alpha * big_a
SCREAMING_SNAKE_CASE__ = -2 * _cos
SCREAMING_SNAKE_CASE__ = 1 - alpha * big_a
SCREAMING_SNAKE_CASE__ = 1 + alpha / big_a
SCREAMING_SNAKE_CASE__ = -2 * _cos
SCREAMING_SNAKE_CASE__ = 1 - alpha / big_a
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE__ = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE__ = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE__ = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE__ = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE__ = 2 * sqrt(__UpperCAmelCase ) * alpha
SCREAMING_SNAKE_CASE__ = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE__ = 2 * big_a * mpc
SCREAMING_SNAKE_CASE__ = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE__ = ppmc + aaa
SCREAMING_SNAKE_CASE__ = -2 * pmpc
SCREAMING_SNAKE_CASE__ = ppmc - aaa
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
SCREAMING_SNAKE_CASE__ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE__ = sin(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cos(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE__ = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE__ = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE__ = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE__ = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE__ = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE__ = 2 * sqrt(__UpperCAmelCase ) * alpha
SCREAMING_SNAKE_CASE__ = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE__ = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE__ = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE__ = pmc + aaa
SCREAMING_SNAKE_CASE__ = 2 * mpc
SCREAMING_SNAKE_CASE__ = pmc - aaa
SCREAMING_SNAKE_CASE__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 159 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : int=2 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Any=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Any=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : List[str]=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : int=0.0_2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=0.6 , lowerCamelCase__ : str=None , ) -> str:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ) -> str:
"""simple docstring"""
__lowercase = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowerCamelCase__ )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase_ : Optional[int] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Any = False
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
__lowercase = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A( ) -> List[str]:
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ , noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
__lowercase = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase__ ) , atol=1e-4 ) )
| 705 |
import itertools
import string
from collections.abc import Generator, Iterable
def _A( UpperCamelCase__ : Iterable[str] , UpperCamelCase__ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__lowercase = iter(UpperCamelCase__ )
while True:
__lowercase = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def _A( UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowercase = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def _A( UpperCamelCase__ : str ) -> list[str]:
'''simple docstring'''
__lowercase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = generate_table(UpperCamelCase__ )
__lowercase = prepare_input(UpperCamelCase__ )
__lowercase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__lowercase = generate_table(UpperCamelCase__ )
__lowercase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
__lowercase , __lowercase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 362 | 0 |
from __future__ import annotations
snake_case : Optional[int] = [True] * 1_000_001
snake_case : Tuple = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
snake_case : List[Any] = False
i += 1
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
return seive[n]
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
return any(digit in "02468" for digit in str(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : int = 1000000 ) -> list[int]:
'''simple docstring'''
__magic_name__ : Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_snake_case ) and not contains_an_even_digit(_snake_case ):
__magic_name__ : Tuple = str(_snake_case )
__magic_name__ : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_snake_case ) )]
if all(is_prime(_snake_case ) for i in list_nums ):
result.append(_snake_case )
return result
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 124 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DiTPipeline
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_a , )
__magic_name__ : int = AutoencoderKL()
__magic_name__ : str = DDIMScheduler()
__magic_name__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu"
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = self.get_dummy_inputs(_a )
__magic_name__ : Any = pipe(**_a ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__magic_name__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__magic_name__ : int = ["vase", "umbrella", "white shark", "white wolf"]
__magic_name__ : str = pipe.get_label_ids(_a )
__magic_name__ : Dict = pipe(_a , generator=_a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : int = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__magic_name__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__magic_name__ : List[str] = ["vase", "umbrella"]
__magic_name__ : Any = pipe.get_label_ids(_a )
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 124 | 1 |
from __future__ import annotations
import requests
_lowerCAmelCase = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def _snake_case ( __snake_case , __snake_case = 1 , __snake_case = "new" , __snake_case = None ):
_UpperCamelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
_UpperCamelCase = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__snake_case )
_UpperCamelCase = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
_UpperCamelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
_UpperCamelCase = {}
for id_ in range(__snake_case ):
_UpperCamelCase = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 71 | import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :List[Any] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> List[str]:
'''simple docstring'''
model.eval()
UpperCamelCase__ :Optional[Any] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :Tuple = model(**__a )
UpperCamelCase__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :str = config['''lr''']
UpperCamelCase__ :str = int(config['''num_epochs'''] )
UpperCamelCase__ :Any = int(config['''seed'''] )
UpperCamelCase__ :Union[str, Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[str] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :int = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Any = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Any = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Optional[int] = 1
UpperCamelCase__ :Any = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :List[str] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :str = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :Any = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Union[str, Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :List[Any] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :int = int(__a ) + 1
UpperCamelCase__ :Union[str, Any] = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Any = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[int] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Dict = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :int = f'''epoch_{epoch}'''
UpperCamelCase__ :Any = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :Tuple = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :str = accuracy
UpperCamelCase__ :Optional[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Union[str, Any] = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :Optional[Any] = epoch
UpperCamelCase__ :Union[str, Any] = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Dict = parser.parse_args()
UpperCamelCase__ :Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 189 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
_a = 42
_a = None
_a = None
__snake_case = namedtuple('''CoinsDistribResult''', '''moves excess''')
def a ( __a ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = get_distrib(node.left )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = get_distrib(node.right )
UpperCamelCase__ :Tuple = 1 - left_distrib_excess
UpperCamelCase__ :Optional[int] = 1 - right_distrib_excess
UpperCamelCase__ :int = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
UpperCamelCase__ :Optional[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase ( nn.Module ):
"""simple docstring"""
a__ = 4_2
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __snake_case):
_UpperCamelCase : str = hidden_states.shape
_UpperCamelCase : Union[str, Any] = jax.image.resize(
__snake_case , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_UpperCamelCase : Dict = self.conv(__snake_case)
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
a__ = 4_2
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __snake_case):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCamelCase : Any = self.conv(__snake_case)
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
a__ = 4_2
a__ = None
a__ = 0.0
a__ = None
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
_UpperCamelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
_UpperCamelCase : Tuple = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : int = nn.Dense(__snake_case , dtype=self.dtype)
_UpperCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
_UpperCamelCase : Tuple = nn.Dropout(self.dropout_prob)
_UpperCamelCase : List[str] = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCamelCase : str = None
if use_nin_shortcut:
_UpperCamelCase : Optional[int] = nn.Conv(
__snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , __snake_case , __snake_case , __snake_case=True):
_UpperCamelCase : str = hidden_states
_UpperCamelCase : Optional[Any] = self.norma(__snake_case)
_UpperCamelCase : List[Any] = nn.swish(__snake_case)
_UpperCamelCase : List[Any] = self.conva(__snake_case)
_UpperCamelCase : List[str] = self.time_emb_proj(nn.swish(__snake_case))
_UpperCamelCase : Dict = jnp.expand_dims(jnp.expand_dims(__snake_case , 1) , 1)
_UpperCamelCase : str = hidden_states + temb
_UpperCamelCase : Optional[int] = self.norma(__snake_case)
_UpperCamelCase : List[Any] = nn.swish(__snake_case)
_UpperCamelCase : Optional[Any] = self.dropout(__snake_case , __snake_case)
_UpperCamelCase : Tuple = self.conva(__snake_case)
if self.conv_shortcut is not None:
_UpperCamelCase : Dict = self.conv_shortcut(__snake_case)
return hidden_states + residual
| 703 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648 | 0 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE_ = jnp.floataa
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case : Optional[int] = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case : Optional[int] = self.block_out_channels[i]
_snake_case : Any = self.block_out_channels[i + 1]
_snake_case : Optional[int] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(SCREAMING_SNAKE_CASE__ )
_snake_case : int = blocks
_snake_case : List[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[Any] = self.conv_in(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = nn.silu(SCREAMING_SNAKE_CASE__ )
for block in self.blocks:
_snake_case : Tuple = block(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = nn.silu(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = self.conv_out(SCREAMING_SNAKE_CASE__ )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 3_2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 1_2_8_0
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = jnp.floataa
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = "rgb"
SCREAMING_SNAKE_CASE_ = (1_6, 3_2, 9_6, 2_5_6)
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case : str = jnp.zeros(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
_snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
_snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case : int = jnp.zeros(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
_snake_case , _snake_case : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )["params"]
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[str] = self.block_out_channels
_snake_case : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case : Any = self.num_attention_heads or self.attention_head_dim
# input
_snake_case : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_snake_case : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_snake_case : List[str] = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
_snake_case : Dict = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_snake_case : Dict = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : int = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case : List[str] = []
_snake_case : List[str] = []
_snake_case : List[str] = block_out_channels[0]
_snake_case : Optional[int] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE__ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case : Optional[int] = output_channel
_snake_case : Dict = block_out_channels[i]
_snake_case : Dict = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_snake_case : List[str] = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(SCREAMING_SNAKE_CASE__ )
for _ in range(self.layers_per_block ):
_snake_case : str = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE__ )
if not is_final_block:
_snake_case : int = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(SCREAMING_SNAKE_CASE__ )
_snake_case : int = down_blocks
_snake_case : Optional[Any] = controlnet_down_blocks
# mid
_snake_case : Union[str, Any] = block_out_channels[-1]
_snake_case : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_snake_case : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , ):
"""simple docstring"""
_snake_case : Tuple = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case : Union[str, Any] = jnp.flip(SCREAMING_SNAKE_CASE__ , axis=1 )
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ):
_snake_case : str = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
_snake_case : Optional[Any] = jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 0 )
_snake_case : Optional[Any] = self.time_proj(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = self.time_embedding(SCREAMING_SNAKE_CASE__ )
# 2. pre-process
_snake_case : Optional[int] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 2, 3, 1) )
_snake_case : Any = self.conv_in(SCREAMING_SNAKE_CASE__ )
_snake_case : Any = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 2, 3, 1) )
_snake_case : Any = self.controlnet_cond_embedding(SCREAMING_SNAKE_CASE__ )
sample += controlnet_cond
# 3. down
_snake_case : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
else:
_snake_case , _snake_case : Dict = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case : List[Any] = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
# 5. contronet blocks
_snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(SCREAMING_SNAKE_CASE__ , self.controlnet_down_blocks ):
_snake_case : Optional[Any] = controlnet_block(SCREAMING_SNAKE_CASE__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case : Tuple = controlnet_down_block_res_samples
_snake_case : str = self.controlnet_mid_block(SCREAMING_SNAKE_CASE__ )
# 6. scaling
_snake_case : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=SCREAMING_SNAKE_CASE__ , mid_block_res_sample=SCREAMING_SNAKE_CASE__ )
| 519 |
def UpperCAmelCase ( A__ ) -> list[list[int]]:
_snake_case : List[str] = []
if len(A__ ) == 1:
return [nums.copy()]
for _ in range(len(A__ ) ):
_snake_case : Optional[Any] = nums.pop(0 )
_snake_case : Any = permute(A__ )
for perm in permutations:
perm.append(A__ )
result.extend(A__ )
nums.append(A__ )
return result
def UpperCAmelCase ( A__ ) -> List[Any]:
def backtrack(A__ ):
if start == len(A__ ) - 1:
output.append(nums[:] )
else:
for i in range(A__ , len(A__ ) ):
_snake_case , _snake_case : Dict = nums[i], nums[start]
backtrack(start + 1 )
_snake_case , _snake_case : Union[str, Any] = nums[i], nums[start] # backtrack
_snake_case : int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 519 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[str] = {}
class _a (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'llama'
lowerCAmelCase_ : List[Any] = ['past_key_values']
def __init__( self ,__a=32_000 ,__a=4_096 ,__a=11_008 ,__a=32 ,__a=32 ,__a=None ,__a="silu" ,__a=2_048 ,__a=0.02 ,__a=1E-6 ,__a=True ,__a=0 ,__a=1 ,__a=2 ,__a=1 ,__a=False ,__a=None ,**__a ,) -> str:
snake_case : List[str] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Tuple = intermediate_size
snake_case : int = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case : Tuple = num_attention_heads
snake_case : Any = num_key_value_heads
snake_case : Optional[Any] = hidden_act
snake_case : Any = initializer_range
snake_case : List[str] = rms_norm_eps
snake_case : Union[str, Any] = pretraining_tp
snake_case : Optional[int] = use_cache
snake_case : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,tie_word_embeddings=_a ,**_a ,)
def snake_case_ ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
snake_case : str = self.rope_scaling.get("""type""" ,_a )
snake_case : str = self.rope_scaling.get("""factor""" ,_a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a ,_a ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 116 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 714 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = GPTSwaTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a_ : Optional[int] = GPTSwaTokenizer(a_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self , a_ ):
a_ : Union[str, Any] = "This is a test"
a_ : Tuple = "This is a test"
return input_text, output_text
def snake_case_ ( self ):
a_ : List[str] = "<s>"
a_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def snake_case_ ( self ):
a_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a_ ) , 2_0_0_0 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def snake_case_ ( self ):
a_ : Union[str, Any] = GPTSwaTokenizer(a_ )
a_ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
a_ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
a_ : List[str] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(a_ )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def snake_case_ ( self ):
a_ : List[str] = GPTSwaTokenizer(a_ )
a_ : List[Any] = ["This is a test", "I was born in 92000, and this is falsé."]
a_ : Optional[Any] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a_ , a_ ):
self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a_ , a_ ):
self.assertEqual(tokenizer.decode_fast(a_ ) , a_ )
@slow
def snake_case_ ( self ):
a_ : Dict = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
a_ : Union[str, Any] = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=a_ , ) | 370 | 0 |
def A ( snake_case__ : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( snake_case__ : str ) -> bool:
'''simple docstring'''
__snake_case = credit_card_number
__snake_case = 0
__snake_case = len(snake_case__ ) - 2
for i in range(snake_case__ , -1 , -2 ):
# double the value of every second digit
__snake_case = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__snake_case = cc_number[:i] + str(snake_case__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( snake_case__ : str ) -> bool:
'''simple docstring'''
__snake_case = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(snake_case__ ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(snake_case__ ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(snake_case__ ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 313 |
from ...processing_utils import ProcessorMixin
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''SpeechT5FeatureExtractor'''
__UpperCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self , lowercase_ , lowercase_) -> Optional[int]:
super().__init__(lowercase_ , lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> int:
__snake_case = kwargs.pop('audio' , lowercase_)
__snake_case = kwargs.pop('text' , lowercase_)
__snake_case = kwargs.pop('text_target' , lowercase_)
__snake_case = kwargs.pop('audio_target' , lowercase_)
__snake_case = kwargs.pop('sampling_rate' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?')
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.')
if audio is not None:
__snake_case = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
__snake_case = self.tokenizer(lowercase_ , **lowercase_)
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
__snake_case = targets['input_values']
elif text_target is not None:
__snake_case = self.tokenizer(lowercase_ , **lowercase_)
__snake_case = targets['input_ids']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask')
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def _a ( self , *lowercase_ , **lowercase_) -> int:
__snake_case = kwargs.pop('input_values' , lowercase_)
__snake_case = kwargs.pop('input_ids' , lowercase_)
__snake_case = kwargs.pop('labels' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.')
if input_values is not None:
__snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
__snake_case = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(lowercase_ , **lowercase_)
__snake_case = targets['input_ids']
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
__snake_case = feature_size_hack
__snake_case = targets['input_values']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask')
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 313 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ): # noqa: E741
"""simple docstring"""
A_ : Tuple = len(_UpperCAmelCase )
A_ : Optional[Any] = 0
A_ : int = [0] * n
A_ : List[Any] = [False] * n
A_ : Tuple = [False] * n
def dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if parent == root:
out_edge_count += 1
A_ : Optional[Any] = True
A_ : List[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A_ : str = dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A_ : List[Any] = True
# AP found via cycle
if at == low[to]:
A_ : int = True
else:
A_ : Tuple = min(low[at] , _UpperCAmelCase )
return out_edge_count
for i in range(_UpperCAmelCase ):
if not visited[i]:
A_ : Optional[int] = 0
A_ : Any = dfs(_UpperCAmelCase , _UpperCAmelCase , -1 , _UpperCAmelCase )
A_ : str = out_edge_count > 1
for x in range(len(_UpperCAmelCase ) ):
if is_art[x] is True:
print(_UpperCAmelCase )
# Adjacency list of graph
lowerCamelCase_ : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 302 |
"""simple docstring"""
import sys
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = len(_UpperCAmelCase )
A_ : int = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
A_ : Tuple = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
for chain_length in range(2 , _UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
A_ : Optional[Any] = a + chain_length - 1
A_ : List[str] = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A_ : Optional[Any] = cost
A_ : Optional[int] = c
return matrix, sol
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i == j:
print('A' + str(_UpperCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase )
print(')' , end=' ' )
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
A_ : Optional[Any] = len(_UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A_ , A_ : int = matrix_chain_order(_UpperCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main() | 302 | 1 |
from collections import defaultdict
def lowercase ( __A : int ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = 1
snake_case : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowercase , __lowercase : Union[str, Any] = 10, 9
__lowercase : Dict = defaultdict(list)
__lowercase : dict[int, bool] = {}
__lowercase : list[int] = []
__lowercase : Optional[Any] = 0
__lowercase : Optional[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 36 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """swin2sr"""
_lowerCamelCase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , snake_case_ : Optional[int]=6_4 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[int]=3 , snake_case_ : Optional[Any]=1_8_0 , snake_case_ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case_ : List[Any]=[6, 6, 6, 6, 6, 6] , snake_case_ : Tuple=8 , snake_case_ : Optional[Any]=2.0 , snake_case_ : Any=True , snake_case_ : str=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : str=0.1 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Dict=False , snake_case_ : int=0.0_2 , snake_case_ : List[Any]=1e-5 , snake_case_ : int=2 , snake_case_ : Any=1.0 , snake_case_ : Dict="1conv" , snake_case_ : Optional[Any]="pixelshuffle" , **snake_case_ : int , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = upscale
_UpperCAmelCase = img_range
_UpperCAmelCase = resi_connection
_UpperCAmelCase = upsampler
| 236 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : List[Any] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 1 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _UpperCamelCase ( UpperCamelCase = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(UpperCamelCase ):
__UpperCAmelCase : Any = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase , UpperCamelCase ).lstrip("./" )
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(UpperCamelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def _UpperCamelCase ( UpperCamelCase = "." ) -> None:
"""simple docstring"""
__UpperCAmelCase : str = ""
for filepath in sorted(good_file_paths(UpperCamelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = os.path.split(UpperCamelCase )
if filepath != old_path:
__UpperCAmelCase : Tuple = print_path(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
__UpperCAmelCase : Any = f"{filepath}/{filename}".replace(" " , "%20" )
__UpperCAmelCase : int = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"{md_prefix(UpperCamelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 77 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Tuple = GPTSwaTokenizer
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Any = False
def __lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_a : int = GPTSwaTokenizer(_UpperCAmelCase ,eos_token='<unk>' ,bos_token='<unk>' ,pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ):
_a : Any = 'This is a test'
_a : Any = 'This is a test'
return input_text, output_text
def __lowercase ( self : str ):
_a : Optional[Any] = '<s>'
_a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
_a : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(_UpperCAmelCase ) ,2000 )
def __lowercase ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size ,2000 )
def __lowercase ( self : List[Any] ):
_a : Union[str, Any] = GPTSwaTokenizer(_UpperCAmelCase )
_a : str = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[465, 287, 265, 631, 842] )
_a : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_UpperCAmelCase ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] ,)
# fmt: on
_a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase ,[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
_a : List[str] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
# fmt: off
self.assertListEqual(
_UpperCAmelCase ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __lowercase ( self : str ):
_a : List[Any] = GPTSwaTokenizer(_UpperCAmelCase )
_a : Optional[Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
_a : Dict = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_UpperCAmelCase ) ,_UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_UpperCAmelCase ) ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[Any] ):
_a : Optional[int] = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_a : Tuple = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='AI-Sweden/gpt-sw3-126m' ,sequences=_UpperCAmelCase ,)
| 358 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['flax', 'transformers']
def __init__( self : Dict , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> Dict:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : Tuple , *lowerCamelCase : Dict , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ["""flax""", """transformers"""] )
class __snake_case ( metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['flax', 'transformers']
def __init__( self : Any , *lowerCamelCase : int , **lowerCamelCase : int ) -> Any:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
class __snake_case ( metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['flax', 'transformers']
def __init__( self : Dict , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> int:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : Optional[int] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Dict ) -> int:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : List[str] , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
class __snake_case ( metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['flax', 'transformers']
def __init__( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : str , *lowerCamelCase : Optional[int] , **lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __lowercase ( cls : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Dict ) -> List[str]:
requires_backends(cls , ["""flax""", """transformers"""] )
| 398 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 398 | 1 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase__:
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = data
SCREAMING_SNAKE_CASE : List[Any] = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ) -> Optional[int]:
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
SCREAMING_SNAKE_CASE : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = list(struct.unpack('''>16L''' , lowerCamelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
SCREAMING_SNAKE_CASE : Tuple = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.padding()
SCREAMING_SNAKE_CASE : int = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.expand_block(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : Optional[int] = 0x5A82_7999
elif 20 <= i < 40:
SCREAMING_SNAKE_CASE : str = b ^ c ^ d
SCREAMING_SNAKE_CASE : Dict = 0x6ED9_EBA1
elif 40 <= i < 60:
SCREAMING_SNAKE_CASE : Optional[Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : Tuple = 0x8F1B_BCDC
elif 60 <= i < 80:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0xCA62_C1D6
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = (
self.rotate(lowerCamelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(lowerCamelCase_ , 30 ),
c,
d,
)
SCREAMING_SNAKE_CASE : List[Any] = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = B'''Test String'''
assert SHAaHash(a_ ).final_hash() == hashlib.shaa(a_ ).hexdigest() # noqa: S324
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : List[str] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = bytes(a_ , '''utf-8''' )
print(SHAaHash(a_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 698 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : List[Any] = old_name
if "patch_embed" in old_name:
snake_case , snake_case , snake_case : int = old_name.split(""".""" )
if layer == "0":
snake_case : Dict = old_name.replace("""0""" ,"""convolution1""" )
elif layer == "1":
snake_case : Optional[Any] = old_name.replace("""1""" ,"""batchnorm_before""" )
elif layer == "3":
snake_case : Dict = old_name.replace("""3""" ,"""convolution2""" )
else:
snake_case : int = old_name.replace("""4""" ,"""batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" ,lowercase ):
snake_case : str = R"""\b\d{2}\b"""
if bool(re.search(lowercase ,lowercase ) ):
snake_case : Optional[Any] = re.search(R"""\d\.\d\d.""" ,lowercase ).group()
else:
snake_case : str = re.search(R"""\d\.\d.""" ,lowercase ).group()
if int(match[0] ) < 6:
snake_case : List[str] = old_name.replace(lowercase ,"""""" )
snake_case : Any = trimmed_name.replace("""network""" ,match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
snake_case : int = """intermediate_stages.""" + trimmed_name
else:
snake_case : Any = old_name.replace(lowercase ,"""""" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case : int = trimmed_name.replace("""network""" ,"""meta4D_layers.blocks.""" + match[2] )
else:
snake_case : Tuple = str(int(match[2] ) - num_meta4D_last_stage )
snake_case : Optional[int] = trimmed_name.replace("""network""" ,"""meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
snake_case : str = trimmed_name.replace("""norm1""" ,"""layernorm1""" )
elif "norm2" in old_name:
snake_case : Optional[Any] = trimmed_name.replace("""norm2""" ,"""layernorm2""" )
elif "fc1" in old_name:
snake_case : Optional[Any] = trimmed_name.replace("""fc1""" ,"""linear_in""" )
elif "fc2" in old_name:
snake_case : Any = trimmed_name.replace("""fc2""" ,"""linear_out""" )
snake_case : Optional[int] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" ,lowercase ):
snake_case : List[str] = old_name.replace("""network""" ,"""intermediate_stages""" )
if "fc" in new_name:
snake_case : Dict = new_name.replace("""fc""" ,"""convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case : Optional[Any] = new_name.replace("""norm1""" ,"""batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case : Tuple = new_name.replace("""norm2""" ,"""batchnorm_after""" )
if "proj" in new_name:
snake_case : Dict = new_name.replace("""proj""" ,"""projection""" )
if "dist_head" in new_name:
snake_case : Dict = new_name.replace("""dist_head""" ,"""distillation_classifier""" )
elif "head" in new_name:
snake_case : Tuple = new_name.replace("""head""" ,"""classifier""" )
elif "patch_embed" in new_name:
snake_case : Tuple = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case : Any = new_name.replace("""norm""" ,"""layernorm""" )
snake_case : Optional[Any] = """efficientformer.""" + new_name
else:
snake_case : Optional[Any] = """efficientformer.encoder.""" + new_name
return new_name
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
for key in checkpoint.copy().keys():
snake_case : Optional[Any] = checkpoint.pop(lowercase )
snake_case : Tuple = val
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : str = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : List[str] = torch.load(lowercase ,map_location="""cpu""" )["""model"""]
snake_case : Optional[int] = EfficientFormerConfig.from_json_file(lowercase )
snake_case : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(lowercase )
snake_case : Any = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
snake_case : List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
snake_case : Any = convert_torch_checkpoint(lowercase ,lowercase )
model.load_state_dict(lowercase )
model.eval()
snake_case : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
snake_case : str = prepare_img()
snake_case : Optional[int] = 256
snake_case : str = 224
snake_case : Optional[int] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} ,crop_size={"""height""": crop_size, """width""": crop_size} ,resample=pillow_resamplings["""bicubic"""] ,)
snake_case : Optional[int] = processor(images=lowercase ,return_tensors="""pt""" ).pixel_values
# original processing pipeline
snake_case : List[Any] = Compose(
[
Resize(lowercase ,interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase ),
ToTensor(),
Normalize(lowercase ,lowercase ),
] )
snake_case : Union[str, Any] = image_transforms(lowercase ).unsqueeze(0 )
assert torch.allclose(lowercase ,lowercase )
snake_case : int = model(lowercase )
snake_case : Optional[int] = outputs.logits
snake_case : Dict = (1, 1000)
if "l1" in model_name:
snake_case : Union[str, Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] ,lowercase ,atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] ,lowercase ,atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case : Any = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" ,commit_message="""Add model""" ,use_temp_dir=lowercase ,)
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" ,commit_message="""Add image processor""" ,use_temp_dir=lowercase ,)
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =BlenderbotSmallTokenizer
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
super().setUp()
a__ =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
a__ ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase_))
def __UpperCamelCase ( self , **lowercase_) -> int:
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Tuple:
a__ ='adapt act apte'
a__ ='adapt act apte'
return input_text, output_text
def __UpperCamelCase ( self) -> Dict:
a__ =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a__ ='adapt act apte'
a__ =['adapt', 'act', 'ap@@', 'te']
a__ =tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
a__ =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
a__ =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
assert tok('sam').input_ids == [1384]
a__ ='I am a small frog.'
a__ =tok([src_text] , padding=lowercase_ , truncation=lowercase_)['input_ids']
a__ =tok.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __UpperCamelCase ( self) -> List[Any]:
a__ =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
a__ ='I am a small frog .'
a__ ='.'
a__ =tok(lowercase_)['input_ids']
a__ =tok(lowercase_)['input_ids']
assert encoded[-1] == encoded_dot[0]
| 20 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ShapEImgaImgPipeline
lowerCAmelCase__ = ["image"]
lowerCAmelCase__ = ["image"]
lowerCAmelCase__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCAmelCase__ = False
@property
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return 8
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__SCREAMING_SNAKE_CASE = CLIPVisionModel(__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_resize=__SCREAMING_SNAKE_CASE , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__SCREAMING_SNAKE_CASE = PriorTransformer(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__SCREAMING_SNAKE_CASE = ShapERenderer(**__SCREAMING_SNAKE_CASE )
return model
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_prior
__SCREAMING_SNAKE_CASE = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE = self.dummy_image_processor
__SCREAMING_SNAKE_CASE = self.dummy_renderer
__SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__SCREAMING_SNAKE_CASE , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__SCREAMING_SNAKE_CASE = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=0 ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = output.images[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch_device == """cpu"""
__SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
__SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 627 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = to_pil_image(_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="""dict""" , config=_lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(_lowerCAmelCase ) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(_lowerCAmelCase )
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
__lowerCAmelCase = size if size is not None else {"""height""": 224, """width""": 224}
__lowerCAmelCase = get_size_dict(snake_case_ )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_value
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def A__ ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size["""height"""], size["""width"""])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(snake_case_ )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
__lowerCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__lowerCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=snake_case_ )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 573 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Dict:
__lowerCAmelCase = [10, 20, 30, 40, 50, 60]
__lowerCAmelCase = [2, 4, 6, 8, 10, 12]
__lowerCAmelCase = 100
self.assertEqual(kp.calc_profit(snake_case_ , snake_case_ , snake_case_ ) , 210 )
def A__ ( self ) -> Dict:
self.assertRaisesRegex(snake_case_ , """max_weight must greater than zero.""" )
def A__ ( self ) -> Tuple:
self.assertRaisesRegex(snake_case_ , """Weight can not be negative.""" )
def A__ ( self ) -> int:
self.assertRaisesRegex(snake_case_ , """Profit can not be negative.""" )
def A__ ( self ) -> Tuple:
self.assertRaisesRegex(snake_case_ , """max_weight must greater than zero.""" )
def A__ ( self ) -> Optional[int]:
self.assertRaisesRegex(
snake_case_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 573 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase : List[str] = logging.get_logger(__name__)
lowercase : str = Dict[str, Any]
lowercase : Optional[int] = List[Prediction]
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*snake_case ,**snake_case )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,"""vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : List[Any] = {}
if "threshold" in kwargs:
lowercase : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : str = load_image(snake_case )
lowercase : Union[str, Any] = torch.IntTensor([[image.height, image.width]] )
lowercase : Any = self.image_processor(images=[image] ,return_tensors="""pt""" )
if self.tokenizer is not None:
lowercase : List[str] = self.tokenizer(text=inputs["""words"""] ,boxes=inputs["""boxes"""] ,return_tensors="""pt""" )
lowercase : Dict = target_size
return inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = model_inputs.pop("""target_size""" )
lowercase : Optional[Any] = self.model(**snake_case )
lowercase : int = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowercase : List[str] = model_inputs["""bbox"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0.9 ):
'''simple docstring'''
lowercase : Optional[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase , lowercase : Tuple = target_size[0].tolist()
def unnormalize(snake_case ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowercase , lowercase : Union[str, Any] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase : Union[str, Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase : Any = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowercase : int = ["""score""", """label""", """box"""]
lowercase : int = [dict(zip(snake_case ,snake_case ) ) for vals in zip(scores.tolist() ,snake_case ,snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase : Tuple = self.image_processor.post_process_object_detection(snake_case ,snake_case ,snake_case )
lowercase : int = raw_annotations[0]
lowercase : Optional[Any] = raw_annotation["""scores"""]
lowercase : Tuple = raw_annotation["""labels"""]
lowercase : List[Any] = raw_annotation["""boxes"""]
lowercase : str = scores.tolist()
lowercase : str = [self.model.config.idalabel[label.item()] for label in labels]
lowercase : str = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase : Optional[int] = ["""score""", """label""", """box"""]
lowercase : Tuple = [
dict(zip(snake_case ,snake_case ) )
for vals in zip(raw_annotation["""scores"""] ,raw_annotation["""labels"""] ,raw_annotation["""boxes"""] )
]
return annotation
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowercase , lowercase , lowercase , lowercase : Union[str, Any] = box.int().tolist()
lowercase : Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 336 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase : Any = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowercase : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase : Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"{key} -> {new_key}" )
lowercase : Dict = s_dict.pop(SCREAMING_SNAKE_CASE__ )
return s_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase , lowercase : Optional[Any] = emb.weight.shape
lowercase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowercase : str = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bytes:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = url.split("""/""" )[-2]
lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowercase : Any = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE__ ) as source, open(SCREAMING_SNAKE_CASE__ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=SCREAMING_SNAKE_CASE__ , unit_divisor=1_024 ) as loop:
while True:
lowercase : Any = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE__ )
loop.update(len(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
lowercase : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase : Tuple = original_checkpoint["""dims"""]
lowercase : str = original_checkpoint["""model_state_dict"""]
lowercase : Tuple = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
rename_keys(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = True
lowercase : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowercase : Tuple = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=SCREAMING_SNAKE_CASE__ , decoder_ffn_dim=SCREAMING_SNAKE_CASE__ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowercase : Any = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowercase : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase : str = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase : Optional[int] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 336 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowercase : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = {}
with open(_lowerCamelCase , "r") as file:
for line_number, line in enumerate(_lowerCamelCase):
__UpperCamelCase : Tuple = line.strip()
if line:
__UpperCamelCase : List[Any] = line.split()
__UpperCamelCase : Optional[int] = line_number
__UpperCamelCase : str = words[0]
__UpperCamelCase : int = value
return result
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("."):
__UpperCamelCase : int = getattr(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase):
__UpperCamelCase : str = PARAM_MAPPING[full_name.split(".")[-1]]
__UpperCamelCase : Optional[Any] = "param"
if weight_type is not None and weight_type != "param":
__UpperCamelCase : int = getattr(_lowerCamelCase , _lowerCamelCase).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase : Any = hf_pointer
for attribute in hf_param_name.split("."):
__UpperCamelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[int] = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase : Dict = value[0]
else:
__UpperCamelCase : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}')
if weight_type == "weight":
__UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
__UpperCamelCase : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split("."):
__UpperCamelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[Any] = value
else:
__UpperCamelCase : int = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : str) -> Any:
'''simple docstring'''
__UpperCamelCase : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase):
__UpperCamelCase : str = PARAM_MAPPING[full_name.split(".")[-1]]
__UpperCamelCase : List[str] = "param"
if weight_type is not None and weight_type != "param":
__UpperCamelCase : Any = ".".join([key, weight_type])
elif weight_type is not None and weight_type == "param":
__UpperCamelCase : str = ".".join([key, hf_param_name])
else:
__UpperCamelCase : str = key
__UpperCamelCase : int = value if "lm_head" in full_key else value[0]
lowercase : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase : int = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
__UpperCamelCase : Dict = True
if "*" in mapped_key:
__UpperCamelCase : Tuple = name.split(_lowerCamelCase)[0].split(".")[-2]
__UpperCamelCase : List[str] = mapped_key.replace("*" , _lowerCamelCase)
if "weight_g" in name:
__UpperCamelCase : Any = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
__UpperCamelCase : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase : int = "weight"
else:
__UpperCamelCase : str = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return is_used
return is_used
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Any) -> str:
'''simple docstring'''
__UpperCamelCase : str = []
__UpperCamelCase : Union[str, Any] = fairseq_model.state_dict()
__UpperCamelCase : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : int = True
else:
__UpperCamelCase : List[str] = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if not is_used:
unused_weights.append(_lowerCamelCase)
logger.warning(F'Unused weights: {unused_weights}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = full_name.split("conv_layers.")[-1]
__UpperCamelCase : List[str] = name.split(".")
__UpperCamelCase : Dict = int(items[0])
__UpperCamelCase : Tuple = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.')
__UpperCamelCase : Optional[int] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.')
__UpperCamelCase : str = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.')
__UpperCamelCase : List[str] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.')
__UpperCamelCase : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_lowerCamelCase)
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=None , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]=False) -> Tuple:
'''simple docstring'''
if config_path is not None:
__UpperCamelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(_lowerCamelCase)
else:
__UpperCamelCase : Optional[Any] = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase : List[str] = read_txt_into_dict(_lowerCamelCase)
__UpperCamelCase : int = idalabel
__UpperCamelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCamelCase)
__UpperCamelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase)
elif is_finetuned:
if dict_path:
__UpperCamelCase : Optional[Any] = Dictionary.load(_lowerCamelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase : List[Any] = target_dict.pad_index
__UpperCamelCase : Union[str, Any] = target_dict.bos_index
__UpperCamelCase : Optional[int] = target_dict.eos_index
__UpperCamelCase : Tuple = len(target_dict.symbols)
__UpperCamelCase : Optional[int] = os.path.join(_lowerCamelCase , "vocab.json")
if not os.path.isdir(_lowerCamelCase):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase))
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
__UpperCamelCase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Dict = 1
with open(_lowerCamelCase , "w" , encoding="utf-8") as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Union[str, Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
__UpperCamelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
__UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__UpperCamelCase : Tuple = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase)
processor.save_pretrained(_lowerCamelCase)
__UpperCamelCase : int = WavaVecaForCTC(_lowerCamelCase)
else:
__UpperCamelCase : Optional[int] = WavaVecaForPreTraining(_lowerCamelCase)
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
else:
__UpperCamelCase : Any = argparse.Namespace(task="audio_pretraining")
__UpperCamelCase : str = fairseq.tasks.setup_task(_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase)
__UpperCamelCase : Any = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned)
hf_wavavec.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowercase : Any = parser.parse_args()
lowercase : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 94 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ComputeEnvironment.AMAZON_SAGEMAKER
_A = True
_A = 'ml.p3.2xlarge'
_A = 'accelerate_sagemaker_execution_role'
_A = 'hf-sm'
_A = 'us-east-1'
_A = 1
_A = 'accelerate-sagemaker-1'
_A = '1.6'
_A = '4.4'
_A = 'train.py'
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Tuple ) -> str:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , a )
assert isinstance(converted_args["do_train"] , a )
assert isinstance(converted_args["epochs"] , a )
assert isinstance(converted_args["learning_rate"] , a )
assert isinstance(converted_args["max_steps"] , a )
with pytest.raises(a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 94 | 1 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[list[int]] = []
__lowerCamelCase : list[int] = []
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[int] = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
lowercase_ = [3, 3_4, 4, 1_2, 5, 2]
lowercase_ = 9
lowercase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 669 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 1 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _snake_case ( snake_case__ : Optional[int] ):
A = torch.exp(__snake_case )
A = torch.sum(__snake_case , dim=1 ) # sum of exp(x_i)
A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ) -> List[Any]:
super().__init__()
A = config.output_attentions
A = config.output_hidden_states
A = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
A = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
A = [-1 for _ in range(config.num_hidden_layers )]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> Optional[Any]:
if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
A = x
else:
A = x
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> Union[str, Any]:
A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : List[Any]=None ,A_ : List[Any]=None ,A_ : Dict=None ,A_ : List[str]=None ,) -> Tuple:
A = ()
A = ()
A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
A = all_hidden_states + (hidden_states,)
A = layer_module(
__lowerCAmelCase ,__lowerCAmelCase ,head_mask[i] ,__lowerCAmelCase ,__lowerCAmelCase )
A = layer_outputs[0]
if self.output_attentions:
A = all_attentions + (layer_outputs[1],)
A = (hidden_states,)
if self.output_hidden_states:
A = current_outputs + (all_hidden_states,)
if self.output_attentions:
A = current_outputs + (all_attentions,)
A = self.highway[i](__lowerCAmelCase )
# logits, pooled_output
if not self.training:
A = highway_exit[0]
A = entropy(__lowerCAmelCase )
A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCAmelCase ,i + 1 )
else:
A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
A = all_hidden_states + (hidden_states,)
A = (hidden_states,)
if self.output_hidden_states:
A = outputs + (all_hidden_states,)
if self.output_attentions:
A = outputs + (all_attentions,)
A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Any ) -> Tuple:
super().__init__(__lowerCAmelCase )
A = config
A = BertEmbeddings(__lowerCAmelCase )
A = DeeBertEncoder(__lowerCAmelCase )
A = BertPooler(__lowerCAmelCase )
self.init_weights()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
self.encoder.init_highway_pooler(self.pooler )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.embeddings.word_embeddings
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Tuple ) -> Tuple:
A = value
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> List[str]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int]=None ,A_ : str=None ,A_ : Dict=None ,A_ : str=None ,A_ : Optional[int]=None ,A_ : int=None ,A_ : Optional[Any]=None ,A_ : Dict=None ,) -> Dict:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
A = input_ids.size()
elif inputs_embeds is not None:
A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A = torch.ones(__lowerCAmelCase ,device=__lowerCAmelCase )
if encoder_attention_mask is None:
A = torch.ones(__lowerCAmelCase ,device=__lowerCAmelCase )
if token_type_ids is None:
A = torch.zeros(__lowerCAmelCase ,dtype=torch.long ,device=__lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A = self.get_extended_attention_mask(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
A = encoder_attention_mask[:, None, None, :]
A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
A = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A = self.get_head_mask(__lowerCAmelCase ,self.config.num_hidden_layers )
A = self.embeddings(
input_ids=__lowerCAmelCase ,position_ids=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,inputs_embeds=__lowerCAmelCase )
A = self.encoder(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,head_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,)
A = encoder_outputs[0]
A = self.pooler(__lowerCAmelCase )
A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict ,A_ : List[Any] ,A_ : Dict ) -> List[Any]:
A = message
A = exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[int] ) -> str:
super().__init__()
A = BertPooler(__lowerCAmelCase )
A = nn.Dropout(config.hidden_dropout_prob )
A = nn.Linear(config.hidden_size ,config.num_labels )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = encoder_outputs[0]
A = self.pooler(__lowerCAmelCase )
# "return" pooler_output
# BertModel
A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
A = bmodel_output[1]
A = self.dropout(__lowerCAmelCase )
A = self.classifier(__lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : Any ) -> List[Any]:
super().__init__(__lowerCAmelCase )
A = config.num_labels
A = config.num_hidden_layers
A = DeeBertModel(__lowerCAmelCase )
A = nn.Dropout(config.hidden_dropout_prob )
A = nn.Linear(config.hidden_size ,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int]=None ,A_ : Any=None ,A_ : str=None ,A_ : str=None ,A_ : Any=None ,A_ : int=None ,A_ : Any=None ,A_ : List[Any]=-1 ,A_ : Union[str, Any]=False ,) -> int:
A = self.num_layers
try:
A = self.bert(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,position_ids=__lowerCAmelCase ,head_mask=__lowerCAmelCase ,inputs_embeds=__lowerCAmelCase ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
A = outputs[1]
A = self.dropout(__lowerCAmelCase )
A = self.classifier(__lowerCAmelCase )
A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A = e.message
A = e.exit_layer
A = outputs[0]
if not self.training:
A = entropy(__lowerCAmelCase )
A = []
A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A = MSELoss()
A = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
A = CrossEntropyLoss()
A = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
A = []
for highway_exit in outputs[-1]:
A = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A = MSELoss()
A = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
A = CrossEntropyLoss()
A = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A = (loss,) + outputs
if not self.training:
A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> list[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = 0
UpperCamelCase_ : Any = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase_ : Optional[int] = i + 1
else:
UpperCamelCase_ : Optional[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 208 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( __A ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __lowercase ( UpperCamelCase_ : ArgumentParser ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 703 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 411 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | """simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = input_paths_and_base_extractors[compression_format]
if input_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_SCREAMING_SNAKE_CASE : Optional[int] = file_path.read_text(encoding="""utf-8""" )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
_SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
_SCREAMING_SNAKE_CASE : Tuple = input_paths[compression_format]
if input_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_SCREAMING_SNAKE_CASE : Tuple = file_path.read_text(encoding="""utf-8""" )
else:
_SCREAMING_SNAKE_CASE : str = output_path.read_text(encoding="""utf-8""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
import tarfile
_SCREAMING_SNAKE_CASE : Any = tmp_path / """data_dot_dot"""
directory.mkdir()
_SCREAMING_SNAKE_CASE : Optional[int] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
import tarfile
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """data_sym_link"""
directory.mkdir()
_SCREAMING_SNAKE_CASE : Optional[int] = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
_SCREAMING_SNAKE_CASE : int = insecure_tar_files[insecure_tar_file]
_SCREAMING_SNAKE_CASE : str = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_SCREAMING_SNAKE_CASE : List[str] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
_SCREAMING_SNAKE_CASE : Any = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 338 | 1 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 331 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="EncodecFeatureExtractor"
UpperCAmelCase =("T5Tokenizer", "T5TokenizerFast")
def __init__( self , snake_case , snake_case) -> List[str]:
'''simple docstring'''
super().__init__(snake_case , snake_case)
_UpperCAmelCase : Dict =self.feature_extractor
_UpperCAmelCase : List[str] =False
def lowerCAmelCase ( self , snake_case=None , snake_case=None , snake_case=True) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case , language=snake_case , no_timestamps=snake_case)
def __call__( self , *snake_case , **snake_case) -> List[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case , **snake_case)
_UpperCAmelCase : Union[str, Any] =kwargs.pop('audio' , snake_case)
_UpperCAmelCase : str =kwargs.pop('sampling_rate' , snake_case)
_UpperCAmelCase : str =kwargs.pop('text' , snake_case)
if len(snake_case) > 0:
_UpperCAmelCase : Union[str, Any] =args[0]
_UpperCAmelCase : int =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if text is not None:
_UpperCAmelCase : List[str] =self.tokenizer(snake_case , **snake_case)
if audio is not None:
_UpperCAmelCase : int =self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase : Union[str, Any] =audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_UpperCAmelCase : Tuple =audio_inputs['padding_mask']
return inputs
def lowerCAmelCase ( self , *snake_case , **snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =kwargs.pop('audio' , snake_case)
_UpperCAmelCase : str =kwargs.pop('padding_mask' , snake_case)
if len(snake_case) > 0:
_UpperCAmelCase : Tuple =args[0]
_UpperCAmelCase : Optional[int] =args[1:]
if audio_values is not None:
return self._decode_audio(snake_case , padding_mask=snake_case)
else:
return self.tokenizer.batch_decode(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : List[str] =to_numpy(snake_case)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] =audio_values.shape
if padding_mask is None:
return list(snake_case)
_UpperCAmelCase : Optional[Any] =to_numpy(snake_case)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase : List[str] =seq_len - padding_mask.shape[-1]
_UpperCAmelCase : List[str] =1 - self.feature_extractor.padding_value
_UpperCAmelCase : Optional[int] =np.pad(snake_case , ((0, 0), (0, difference)) , 'constant' , constant_values=snake_case)
_UpperCAmelCase : List[str] =audio_values.tolist()
for i in range(snake_case):
_UpperCAmelCase : Dict =np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase : Union[str, Any] =sliced_audio.reshape(snake_case , -1)
return audio_values
| 331 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a__ ( lowercase__ ):
'''simple docstring'''
return np.dot(lowercase__ , lowercase__ )
class A :
def __init__( self: List[Any] , *,
_lowerCAmelCase: float = np.inf , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: float = 0.0 , ) -> None:
'''simple docstring'''
UpperCAmelCase_ =regularization
UpperCAmelCase_ =gamma
if kernel == "linear":
UpperCAmelCase_ =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ =F'Unknown kernel: {kernel}'
raise ValueError(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: ndarray , _lowerCAmelCase: ndarray ) -> float:
'''simple docstring'''
return np.dot(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: ndarray , _lowerCAmelCase: ndarray ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: list[ndarray] , _lowerCAmelCase: ndarray ) -> None:
'''simple docstring'''
UpperCAmelCase_ =observations
UpperCAmelCase_ =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) =np.shape(_lowerCAmelCase )
def to_minimize(_lowerCAmelCase: ndarray ) -> float:
UpperCAmelCase_ =0
((UpperCAmelCase_) , ) =np.shape(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCAmelCase )
UpperCAmelCase_ =LinearConstraint(_lowerCAmelCase , 0 , 0 )
UpperCAmelCase_ =Bounds(0 , self.regularization )
UpperCAmelCase_ =minimize(
_lowerCAmelCase , np.ones(_lowerCAmelCase ) , bounds=_lowerCAmelCase , constraints=[ly_contraint] ).x
UpperCAmelCase_ =l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ =0
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ =s / n
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: ndarray ) -> int:
'''simple docstring'''
UpperCAmelCase_ =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : str = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_A : Union[str, Any] = 'CIDAS/clipseg-rd64-refined'
_A : Tuple = 'image_segmenter'
_A : List[Any] = CLIPSegForImageSegmentation
_A : List[str] = ['image', 'text']
_A : Optional[int] = ['image']
def __init__( self : List[str] , *__lowercase : Union[str, Any] , **__lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def A_ ( self : int , __lowercase : "Image" , __lowercase : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__lowercase , return_tensors='''pt''' )
def A_ ( self : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
with torch.no_grad():
__UpperCAmelCase : List[str] = self.model(**__lowercase ).logits
return logits
def A_ ( self : int , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = outputs.cpu().detach().numpy()
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Any = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 522 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 59 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Any , __A :List[Any] , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :str , __A :Union[torch.Tensor, PIL.Image.Image] , __A :Union[torch.Tensor, PIL.Image.Image] , __A :int = 250 , __A :float = 0.0 , __A :int = 10 , __A :int = 10 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__A )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__A )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A ) | 59 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase : int = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __magic_name__ ( _a ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = LxmertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("""type""" ) )
lowerCamelCase = do_lower_case
lowerCamelCase = strip_accents
lowerCamelCase = tokenize_chinese_chars
lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase = do_lower_case
def _lowerCAmelCase ( self , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 543 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : List[str] = '''camembert'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:str = num_hidden_layers
SCREAMING_SNAKE_CASE:List[str] = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = hidden_act
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:str = max_position_embeddings
SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE:Optional[int] = use_cache
SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : List[str] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 143 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase : Dict =grid[0]
for row_n in range(1 , len(lowercase__ ) ):
_UpperCAmelCase : Any =grid[row_n]
_UpperCAmelCase : Tuple =fill_row(lowercase__ , lowercase__ )
_UpperCAmelCase : Union[str, Any] =grid[row_n]
return grid[-1][-1]
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowercase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =CustomTokenizer
pass
| 331 | 0 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
UpperCamelCase_ : Union[str, Any] = number_of_bytes // partitions
UpperCamelCase_ : int = []
for i in range(_UpperCAmelCase ):
UpperCamelCase_ : Dict = i * bytes_per_partition + 1
UpperCamelCase_ : List[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: list[int] = [0]
SCREAMING_SNAKE_CASE_: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE_: int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE_: int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE_: float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE_: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE_: int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
if len(__lowercase ) != len(__lowercase ):
raise ValueError("String lengths must match!" )
_snake_case : List[Any] = 0
for chara, chara in zip(__lowercase , __lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 | from math import pi
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0)) | 580 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : float | Decimal , snake_case_ : float = 10**-10 ) -> float:
'''simple docstring'''
UpperCAmelCase_ = a
while True:
UpperCAmelCase_ = Decimal(snake_case_ ) - (
Decimal(eval(snake_case_ ) ) / Decimal(eval(str(diff(snake_case_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case_ ) ) < precision: # noqa: S307
return float(snake_case_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 78 | def __UpperCamelCase ( A = 10**12 ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 415 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : Any , snake_case : Optional[Any]=2 , snake_case : Dict=8 , snake_case : Any=True , snake_case : Dict=True , snake_case : List[Any]=True , snake_case : List[Any]=True , snake_case : Tuple=99 , snake_case : int=16 , snake_case : Optional[int]=5 , snake_case : Optional[int]=2 , snake_case : Dict=36 , snake_case : List[Any]="gelu" , snake_case : Any=0.0 , snake_case : Union[str, Any]=0.0 , snake_case : Any=512 , snake_case : Any=16 , snake_case : Dict=2 , snake_case : Dict=0.02 , snake_case : str=3 , snake_case : Any=4 , snake_case : List[str]=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Optional[Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case ( self : Any ):
__UpperCamelCase = self.get_config()
__UpperCamelCase = 300
return config
def snake_case ( self : List[Any] ):
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self : Dict , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Tuple , snake_case : Optional[int] , snake_case : int , snake_case : List[Any] ):
__UpperCamelCase = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCamelCase = model(snake_case , token_type_ids=snake_case )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Optional[int] , ):
__UpperCamelCase = True
__UpperCamelCase = MraModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
__UpperCamelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
__UpperCamelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , snake_case : str , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : Any , snake_case : Optional[int] ):
__UpperCamelCase = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : List[Any] , snake_case : Any ):
__UpperCamelCase = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Any , snake_case : int , snake_case : Tuple , snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[int] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : List[Any] , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : List[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : int , snake_case : List[Any] , snake_case : int , snake_case : Optional[Any] , snake_case : Dict ):
__UpperCamelCase = self.num_choices
__UpperCamelCase = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Dict ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = ()
def snake_case ( self : Any ):
__UpperCamelCase = MraModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def snake_case ( self : Dict ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase = type
self.model_tester.create_and_check_model(*snake_case )
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def snake_case ( self : Dict ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def snake_case ( self : List[Any] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def snake_case ( self : List[str] ):
return
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : int ):
__UpperCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__UpperCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCamelCase = model(snake_case )[0]
__UpperCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCamelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def snake_case ( self : Optional[int] ):
__UpperCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__UpperCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCamelCase = model(snake_case )[0]
__UpperCamelCase = 50265
__UpperCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
__UpperCamelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__UpperCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCamelCase = model(snake_case )[0]
__UpperCamelCase = 50265
__UpperCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
__UpperCamelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
| 375 |
import heapq
import sys
import numpy as np
a_ = tuple[int, int]
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
__UpperCamelCase = []
__UpperCamelCase = set()
def snake_case ( self : Dict ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case ( self : Optional[Any] ):
return len(self.elements ) == 0
def snake_case ( self : Optional[int] , snake_case : Tuple , snake_case : Any ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case )
else:
# update
# print("update", item)
__UpperCamelCase = []
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case ( self : Any , snake_case : int ):
if item in self.set:
self.set.remove(snake_case )
__UpperCamelCase = []
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case ( self : Tuple ):
return self.elements[0][1]
def snake_case ( self : List[str] ):
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
self.set.remove(snake_case )
return (priority, item)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = np.array(lowercase_ )
__UpperCamelCase = np.array(lowercase_ )
return np.linalg.norm(a - b )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return consistent_heuristic(lowercase_ , lowercase_ ) // t
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ )
return ans
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
__UpperCamelCase = np.chararray((n, n) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
__UpperCamelCase = '''*'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (j, (n - 1) - i) in blocks:
__UpperCamelCase = '''#'''
__UpperCamelCase = '''-'''
__UpperCamelCase = back_pointer[goal]
while x != start:
((__UpperCamelCase) , (__UpperCamelCase)) = x
# print(x)
__UpperCamelCase = '''-'''
__UpperCamelCase = back_pointer[x]
__UpperCamelCase = '''-'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCamelCase = back_pointer[goal]
while x != start:
print(lowercase_ , end=''' ''' )
__UpperCamelCase = back_pointer[x]
print(lowercase_ )
sys.exit()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
"""simple docstring"""
for itera in range(lowercase_ ):
open_list[itera].remove_element(lowercase_ )
# print("s", s)
# print("j", j)
((__UpperCamelCase) , (__UpperCamelCase)) = s
__UpperCamelCase = (x - 1, y)
__UpperCamelCase = (x + 1, y)
__UpperCamelCase = (x, y + 1)
__UpperCamelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase_ )
__UpperCamelCase = -1
__UpperCamelCase = float('''inf''' )
if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCamelCase = g_function[s] + 1
__UpperCamelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase_ ):
if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key(
lowercase_ , 0 , lowercase_ , lowercase_ ):
open_list[j].put(
lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ = make_common_ground()
a_ = blocks_blk
# hyper parameters
a_ = 1
a_ = 1
a_ = 20
a_ = 3 # one consistent and two other inconsistent
# start and end destination
a_ = (0, 0)
a_ = (n - 1, n - 1)
a_ = 1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = {start: 0, goal: float('''inf''' )}
__UpperCamelCase = {start: -1, goal: -1}
__UpperCamelCase = []
__UpperCamelCase = set()
for i in range(lowercase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
__UpperCamelCase = []
__UpperCamelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowercase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCamelCase , __UpperCamelCase = open_list[i].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_inad.append(lowercase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCamelCase = open_list[0].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_anchor.append(lowercase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 375 | 1 |
from __future__ import annotations
from typing import Any
def _lowerCamelCase( __snake_case ) -> int:
if not postfix_notation:
return 0
__snake_case = {"+", "-", "*", "/"}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 | import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase__ = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase__ = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _lowerCamelCase( __snake_case ) -> Tuple:
__snake_case = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __snake_case )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ) -> Optional[int]:
__snake_case = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__snake_case = collections.defaultdict(__snake_case )
__snake_case = collections.defaultdict(__snake_case )
__snake_case = collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__snake_case ):
__snake_case = None
if _re_tf_models.match(__snake_case ) is not None:
__snake_case = tf_models
__snake_case = _re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
__snake_case = flax_models
__snake_case = _re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
__snake_case = pt_models
__snake_case = _re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_prefix_to_model_type:
__snake_case = True
break
# Try again after removing the last word in the name
__snake_case = "".join(camel_case_split(__snake_case )[:-1] )
__snake_case = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__snake_case = list(__snake_case )
all_models.sort()
__snake_case = {"model_type": all_models}
__snake_case = [pt_models[t] for t in all_models]
__snake_case = [tf_models[t] for t in all_models]
__snake_case = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__snake_case = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__snake_case = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__snake_case = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__snake_case = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__snake_case = "AutoTokenizer"
__snake_case = [processors[t] for t in all_models]
return pd.DataFrame(__snake_case )
def _lowerCamelCase( __snake_case ) -> List[Any]:
__snake_case = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__snake_case = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__snake_case = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__snake_case , __snake_case , __snake_case ):
# The type of pipeline may not exist in this framework
if not hasattr(__snake_case , __snake_case ):
continue
# First extract all model_names
__snake_case = []
for name in getattr(__snake_case , __snake_case ).values():
if isinstance(__snake_case , __snake_case ):
model_names.append(__snake_case )
else:
model_names.extend(list(__snake_case ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( __snake_case , __snake_case ) -> Union[str, Any]:
__snake_case = get_frameworks_table()
__snake_case = Dataset.from_pandas(__snake_case )
__snake_case = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__snake_case )
__snake_case = Dataset.from_json(__snake_case )
__snake_case = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__snake_case ) )
}
__snake_case = update_pipeline_and_auto_class_table(__snake_case )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__snake_case = sorted(table.keys() )
__snake_case = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__snake_case = Dataset.from_pandas(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__snake_case , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__snake_case , "pipeline_tags.json" ) )
if commit_sha is not None:
__snake_case = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__snake_case = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__snake_case , repo_type="dataset" , token=__snake_case , commit_message=__snake_case , )
def _lowerCamelCase( ) -> Union[str, Any]:
__snake_case = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__snake_case = transformers_module.pipelines.SUPPORTED_TASKS
__snake_case = []
for key in pipeline_tasks:
if key not in in_table:
__snake_case = pipeline_tasks[key]["pt"]
if isinstance(__snake_case , (list, tuple) ):
__snake_case = model[0]
__snake_case = model.__name__
if model not in in_table.values():
missing.append(__snake_case )
if len(__snake_case ) > 0:
__snake_case = ", ".join(__snake_case )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 524 | 1 |
'''simple docstring'''
import os
def _snake_case ( A_ : Dict ):
"""simple docstring"""
a_ : List[str] = len(grid[0] )
a_ : int = len(_lowerCamelCase )
a_ : Tuple = 0
a_ : List[Any] = 0
a_ : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
a_ : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a_ : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a_ : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a_ : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
a_ : Optional[Any] = max_product
return largest
def _snake_case ( ):
"""simple docstring"""
a_ : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
a_ : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 719 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__snake_case: Tuple = logging.get_logger(__name__)
__snake_case: Tuple = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "t5"
a_ = ["past_key_values"]
a_ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_21_28 , lowerCAmelCase_=5_12 , lowerCAmelCase_=64 , lowerCAmelCase_=20_48 , lowerCAmelCase_=6 , lowerCAmelCase_=None , lowerCAmelCase_=8 , lowerCAmelCase_=32 , lowerCAmelCase_=1_28 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-6 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[Any] = vocab_size
a_ : List[str] = d_model
a_ : Union[str, Any] = d_kv
a_ : List[Any] = d_ff
a_ : int = num_layers
a_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Union[str, Any] = num_heads
a_ : int = relative_attention_num_buckets
a_ : Dict = relative_attention_max_distance
a_ : Union[str, Any] = dropout_rate
a_ : Union[str, Any] = layer_norm_epsilon
a_ : List[Any] = initializer_factor
a_ : Any = feed_forward_proj
a_ : List[Any] = use_cache
a_ : Dict = self.feed_forward_proj.split("""-""" )
a_ : Union[str, Any] = act_info[-1]
a_ : str = act_info[0] == """gated"""
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : List[str] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
a_ : Tuple = """past_encoder_sequence + sequence"""
a_ : str = {0: """batch"""}
a_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
a_ : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
a_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="""inputs""" )
return common_inputs
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return 13
| 460 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=33 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : int=2 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : str=3 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Optional[Any]=None , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def __A ( self : Any ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : str ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ):
A_ = EsmModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
A_ = EsmForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = EsmForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[Any] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = ()
_lowerCamelCase : Tuple = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = True
def __A ( self : List[Any] ):
A_ = EsmModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : Any ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = EsmModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.model_tester.prepare_config_and_inputs()[0]
A_ = EsmEmbeddings(config=UpperCAmelCase )
A_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
A_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A_ = create_position_ids_from_input_ids(UpperCAmelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase , UpperCAmelCase ) ) )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()[0]
A_ = EsmEmbeddings(config=UpperCAmelCase )
A_ = torch.empty(2 , 4 , 30 )
A_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
A_ = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase , UpperCAmelCase ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def __A ( self : Tuple ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def __A ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : List[str] ):
pass
@require_torch
class _a ( snake_case_ ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
with torch.no_grad():
A_ = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = 33
A_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
A_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : Tuple ):
with torch.no_grad():
A_ = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
A_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A_ = model(UpperCAmelCase )[0]
# compare the actual values for a slice.
A_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) | 86 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=13 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : str=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Dict=[1, 1, 2] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : str="gelu_new" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=False , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = block_sizes
UpperCamelCase_ = num_decoder_layers
UpperCamelCase_ = d_model
UpperCamelCase_ = n_head
UpperCamelCase_ = d_head
UpperCamelCase_ = d_inner
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = 2
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCamelCase_ = n_head
# Used in the tests to check the size of the first hidden state
UpperCamelCase_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCamelCase_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCamelCase_ = self.num_hidden_layers + 2
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = TFFunnelModel(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
UpperCamelCase_ = [input_ids, input_mask]
UpperCamelCase_ = model(__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase_ = False
UpperCamelCase_ = TFFunnelModel(config=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase_ = False
UpperCamelCase_ = TFFunnelModel(config=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowercase__ ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
UpperCamelCase_ = TFFunnelBaseModel(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
UpperCamelCase_ = [input_ids, input_mask]
UpperCamelCase_ = model(__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
UpperCamelCase_ = False
UpperCamelCase_ = TFFunnelBaseModel(config=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
UpperCamelCase_ = False
UpperCamelCase_ = TFFunnelBaseModel(config=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowercase__ ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = TFFunnelForPreTraining(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = TFFunnelForMaskedLM(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
UpperCamelCase_ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = TFFunnelForTokenClassification(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
UpperCamelCase_
) = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( lowercase__ , lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : Dict = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : int = False
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = TFFunnelModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class A ( lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = TFFunnelModelTester(self , base=__UpperCAmelCase )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 704 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Any = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class a ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=32 , A_=True , ):
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Optional[Any] = min_resolution
_UpperCAmelCase : int = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Optional[Any] = size_divisor
_UpperCAmelCase : Dict = do_rescale
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class a ( _UpperCAmelCase , unittest.TestCase ):
_lowercase = GLPNImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = GLPNImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size_divisor" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "resample" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_rescale" ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 300 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =["""image_processor""", """tokenizer"""]
A__ : Dict ="""BlipImageProcessor"""
A__ : str ="""AutoTokenizer"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : int , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE__ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def A_ ( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 472 | 0 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__UpperCamelCase , __UpperCamelCase , is_trivia_qa=__UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
A : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 356 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1e-3 ) )
@slow
def __A ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1e-3 ) )
| 356 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 198 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowercase = "CompVis/stable-diffusion-v1-1"
lowercase = "CompVis/stable-diffusion-v1-2"
lowercase = "CompVis/stable-diffusion-v1-3"
lowercase = "CompVis/stable-diffusion-v1-4"
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a , a , a , a , a , a , a = True , ) -> int:
super()._init_()
snake_case_ = StableDiffusionPipeline.from_pretrained(a )
snake_case_ = StableDiffusionPipeline.from_pretrained(a )
snake_case_ = StableDiffusionPipeline.from_pretrained(a )
snake_case_ = StableDiffusionPipeline(
vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , requires_safety_checker=a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _UpperCamelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , a ) for k in self.config.keys() if not k.startswith('_' )}
def _UpperCamelCase ( self , a = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _UpperCamelCase ( self ) -> List[Any]:
self.enable_attention_slicing(a )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Optional[Any]:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> int:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Optional[int]:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Any:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Optional[Any]:
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 198 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Tuple = '''table-transformer'''
_A : Optional[int] = ['''past_key_values''']
_A : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=100 , lowerCAmelCase_=6 , lowerCAmelCase_=2048 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=2048 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=256 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A__ , A__ ):
A_ : Any = backbone_config.get("""model_type""" )
A_ : int = CONFIG_MAPPING[backbone_model_type]
A_ : str = config_class.from_dict(A__ )
# set timm attributes to None
A_ : Any = None, None, None
A_ : Optional[int] = use_timm_backbone
A_ : List[str] = backbone_config
A_ : Any = num_channels
A_ : Union[str, Any] = num_queries
A_ : Optional[int] = d_model
A_ : Union[str, Any] = encoder_ffn_dim
A_ : str = encoder_layers
A_ : List[Any] = encoder_attention_heads
A_ : str = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : Tuple = decoder_attention_heads
A_ : List[Any] = dropout
A_ : int = attention_dropout
A_ : Any = activation_dropout
A_ : Tuple = activation_function
A_ : Tuple = init_std
A_ : Union[str, Any] = init_xavier_std
A_ : int = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[str] = position_embedding_type
A_ : Tuple = backbone
A_ : List[Any] = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : Tuple = class_cost
A_ : Union[str, Any] = bbox_cost
A_ : List[Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : int = dice_loss_coefficient
A_ : Union[str, Any] = bbox_loss_coefficient
A_ : str = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=A__ , **A__ )
@property
def lowerCamelCase(self ):
return self.encoder_attention_heads
@property
def lowerCamelCase(self ):
return self.d_model
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Dict = version.parse("""1.11""" )
@property
def lowerCamelCase(self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase(self ):
return 1e-5
@property
def lowerCamelCase(self ):
return 12
| 720 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_lowerCAmelCase = "."
if __name__ == "__main__":
_lowerCAmelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_lowerCAmelCase = []
_lowerCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
_lowerCAmelCase = line.strip()
_lowerCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_lowerCAmelCase = "\n".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 480 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.