code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :Optional[Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :int = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Dict = model(__A )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ :int = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Dict = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Dict = model(__A )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) ) | 189 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Any = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 163 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE:
@staticmethod
def snake_case__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
snake_case_ : Dict = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__lowercase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__lowercase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = vqa_pipeline(lowerCamelCase__ , top_k=1 )
self.assertEqual(
lowerCamelCase__ , [
[{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}],
[{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}],
] , )
@require_torch
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowercase = """How many cats are there?"""
__lowercase = vqa_pipeline(image=lowerCamelCase__ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}, {"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}] )
__lowercase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}, {"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}] )
@slow
@require_torch
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowercase = """How many cats are there?"""
__lowercase = vqa_pipeline(image=lowerCamelCase__ , question=lowerCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
__lowercase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
__lowercase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
pass
| 163 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = torch.device("""cpu""")
def __a ( ) -> str:
SCREAMING_SNAKE_CASE : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __a ( __lowerCAmelCase ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : List[Any] = dct.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = val
def __a ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE : int = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE : int = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
SCREAMING_SNAKE_CASE : int = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE : Dict = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
SCREAMING_SNAKE_CASE : List[Any] = k_new.split('.' )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE : Optional[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
SCREAMING_SNAKE_CASE : Dict = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE : List[Any] = 1000
SCREAMING_SNAKE_CASE : Any = 'huggingface/label-files'
SCREAMING_SNAKE_CASE : List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE : Any = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE : Any = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE : str = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE : Any = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE : List[str] = [4, 3, 10, 5]
SCREAMING_SNAKE_CASE : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE : List[Any] = [4, 4, 12, 6]
SCREAMING_SNAKE_CASE : Tuple = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
SCREAMING_SNAKE_CASE : int = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.load(__lowerCAmelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint
SCREAMING_SNAKE_CASE : List[Any] = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE : int = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTImageProcessor.from_pretrained('preprocessor_config' )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=__lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
SCREAMING_SNAKE_CASE : Optional[Any] = get_expected_output(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
_lowerCamelCase : List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt) | 352 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
A_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
A_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
A_ = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
A_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __lowercase ( _A ):
lowercase = ["input_ids"]
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = RESOURCE_FILES_NAMES
def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=False , __lowerCamelCase : int="utf8" , __lowerCamelCase : Union[str, Any]="[UNK]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : List[Any]="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , vocab_file=__lowerCamelCase , encoding=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowercase = do_lower_case
lowercase = sentencepiece_model_ckpt
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase = self.load_vocab(filepath=__lowerCamelCase )
else:
lowercase = {self.sp_model.id_to_piece(__lowerCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase = {v: k for k, v in self.vocab.items()}
def __a ( self : int , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
if text is None:
return None
lowercase = self.tokenize(__lowerCamelCase )
lowercase ,lowercase = '''''', []
for i, ch in enumerate(__lowerCamelCase ):
if ch in self.SP_CHAR_MAPPING:
lowercase = self.SP_CHAR_MAPPING.get(__lowerCamelCase )
else:
lowercase = unicodedata.normalize('''NFKC''' , __lowerCamelCase )
if self.is_whitespace(__lowerCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowerCamelCase ) )
lowercase ,lowercase ,lowercase = normalized_text, [], 0
if self.do_lower_case:
lowercase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase = token[1:]
lowercase = text[offset:].index(__lowerCamelCase ) + offset
lowercase = start + len(__lowerCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase = end
return token_mapping
@property
def __a ( self : str ) -> Tuple:
'''simple docstring'''
return len(self.vocab )
def __a ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self : int , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __a ( self : Any , __lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(__lowerCamelCase , __lowerCamelCase ) for c in text) )
def __a ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : Any=0.1 ) -> int:
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase = self.sp_model.EncodeAsPieces(__lowerCamelCase )
else:
lowercase = self.sp_model.SampleEncodeAsPieces(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase = []
for pi, piece in enumerate(__lowerCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowerCamelCase ) and pi != 0:
new_pieces.append(__lowerCamelCase )
continue
else:
continue
lowercase = 0
for i, chunk in enumerate(__lowerCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowerCamelCase ) or self.is_punct(__lowerCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowerCamelCase )
lowercase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
if len(__lowerCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __a ( self : str , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def __a ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase = self.convert_ids_to_tokens(__lowerCamelCase )
lowercase = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def __a ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def __a ( self : int , __lowerCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
return self.reverse_vocab.get(__lowerCamelCase , self.unk_token )
def __a ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None ) -> str:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __a ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int=None ) -> Any:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __a ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def __a ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowerCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowerCamelCase ) + 1) + [1] * (len(__lowerCamelCase ) + 3)
def __a ( self : Union[str, Any] , __lowerCamelCase : Dict ) -> str:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __a ( self : Tuple , __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __a ( self : Dict , __lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __a ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowerCamelCase ) == 1:
lowercase = unicodedata.category(__lowerCamelCase )
if cat == "Zs":
return True
return False
def __a ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = {}
with io.open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__lowerCamelCase ):
lowercase = line.rstrip('''\n''' )
lowercase = int(__lowerCamelCase )
return token_to_idx
def __a ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase = 0
if os.path.isdir(__lowerCamelCase ):
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowercase = token_index
writer.write(token + '''\n''' )
index += 1
lowercase = os.path.join(__lowerCamelCase , '''sentencepiece.bpe.model''' )
with open(__lowerCamelCase , '''wb''' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (vocab_file,)
| 604 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : List[Any] = nn.Parameter(UpperCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : List[Any] = nn.Parameter(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# set torch weights for 1-to-1 comparison
_a : Dict = np.asarray(weights[0] )
_a : List[Any] = np.asarray(weights[1] )
_a : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase_ ).view(-1 , UpperCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# set torch weights for 1-to-1 comparison
_a : Any = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : List[str] = np.asarray(weights[2] )
_a : Dict = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase_ ).view(-1 , UpperCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# layernorm 1
_a : List[Any] = weights[0][0][0]
_a : Dict = np.asarray(layer_norm_a[0] )
_a : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) , )
# lsh weights + output
_a : int = weights[0][1]
if len(UpperCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase_ , torch_block.attention , UpperCamelCase_ )
else:
set_layer_weights_in_torch_local(UpperCamelCase_ , torch_block.attention , UpperCamelCase_ )
# intermediate weighs
_a : Union[str, Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase_ ) == 4:
_a : Tuple = intermediate_weights[2]
# layernorm 2
_a : str = np.asarray(intermediate_weights[0][0] )
_a : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) , )
# intermediate dense
_a : str = np.asarray(intermediate_weights[1][0] )
_a : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase_ ) , )
# intermediate out
_a : Dict = np.asarray(intermediate_weights[4][0] )
_a : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase_ ) , )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# reformer model
_a : Dict = torch_model.reformer
# word embeds
_a : Any = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase_ ) , )
if isinstance(weights[3] , UpperCamelCase_ ):
_a : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : int = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : str = nn.Parameter(torch.tensor(UpperCamelCase_ ) )
_a : Union[str, Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : List[str] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) , )
# output embeddings
_a : Tuple = np.asarray(weights[9][0] )
_a : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase_ ) , )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# Initialise PyTorch model
_a : Tuple = ReformerConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : Optional[int] = ReformerModelWithLMHead(UpperCamelCase_ )
with open(UpperCamelCase_ , '''rb''' ) as f:
_a : int = pickle.load(UpperCamelCase_ )['''weights''']
set_model_weights_in_torch(UpperCamelCase_ , UpperCamelCase_ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 249 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase_ )
_a : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCamelCase_ )
_a : List[str] = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
_a : str = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
_a : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : str = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : int = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
_a : int = f"""layers_{str(UpperCamelCase_ )}"""
# Self-Attention
_a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
_a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : List[str] = flax_model.params['''encoder''']['''block'''][str(UpperCamelCase_ )]['''layer''']
_a : Optional[Any] = tax_attention_key
_a : List[str] = tax_attention_out
_a : Union[str, Any] = tax_attention_query
_a : Tuple = tax_attention_value
_a : int = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : str = tax_global_layer_norm
if split_mlp_wi:
_a : Tuple = tax_mlp_wi_a
_a : Optional[Any] = tax_mlp_wi_a
else:
_a : Tuple = tax_mlp_wi
_a : Dict = tax_mlp_wo
_a : Optional[Any] = tax_mlp_layer_norm
_a : Dict = flax_model_encoder_layer_block
# Only for layer 0:
_a : Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Union[str, Any] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
_a : Union[str, Any] = tax_encoder_global_rel_embedding
# Assigning
_a : List[str] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
_a : Optional[int] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : int = f"""layers_{str(UpperCamelCase_ )}"""
# Self-Attention
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
_a : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
_a : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
_a : Union[str, Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
_a : List[Any] = tax_enc_dec_attention_module['''out''']['''kernel''']
_a : int = tax_enc_dec_attention_module['''query''']['''kernel''']
_a : Any = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
_a : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : List[Any] = flax_model.params['''decoder''']['''block'''][str(UpperCamelCase_ )]['''layer''']
_a : List[Any] = tax_attention_key
_a : List[Any] = tax_attention_out
_a : Optional[Any] = tax_attention_query
_a : List[Any] = tax_attention_value
_a : Optional[int] = tax_pre_attention_layer_norm
_a : List[Any] = tax_enc_dec_attention_key
_a : Any = tax_enc_dec_attention_out
_a : Tuple = tax_enc_dec_attention_query
_a : Any = tax_enc_dec_attention_value
_a : Optional[int] = tax_cross_layer_norm
if split_mlp_wi:
_a : Dict = tax_mlp_wi_a
_a : Union[str, Any] = tax_mlp_wi_a
else:
_a : Dict = tax_mlp_wi
_a : str = tax_mlp_wo
_a : int = txa_mlp_layer_norm
_a : int = flax_model_decoder_layer_block
# Decoder Normalization
_a : List[str] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
_a : int = txa_decoder_norm
# Only for layer 0:
_a : int = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : Tuple = tax_decoder_rel_embedding
# Token Embeddings
_a : str = tax_model['''target''']['''token_embedder''']['''embedding''']
_a : Tuple = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : Optional[int] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(UpperCamelCase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 249 | 1 |
'''simple docstring'''
import sys
UpperCamelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase ( __magic_name__ : str = N ) -> int:
lowercase : Dict =-sys.maxsize - 1
for i in range(len(__magic_name__ ) - 12 ):
lowercase : Any =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase : int =product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | lowerCamelCase__ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 524 | 0 |
'''simple docstring'''
def _a ( lowerCamelCase_ ):
snake_case : Dict =1
snake_case : int =2
while i * i <= n:
snake_case : Union[str, Any] =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ):
snake_case : List[str] =1
snake_case : Tuple =1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 136 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =list(lowerCamelCase_ )
snake_case : Optional[int] =list(lowerCamelCase_ )
snake_case : str =0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
snake_case : int ='''_'''
if count > 1:
return False
else:
return "".join(lowerCamelCase_ )
def _a ( lowerCamelCase_ ):
snake_case : str =[]
while True:
snake_case : str =['''$'''] * len(lowerCamelCase_ )
snake_case : Optional[int] =[]
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
snake_case : Optional[Any] =compare_string(binary[i] , binary[j] )
if k is False:
snake_case : Any ='''*'''
snake_case : Tuple ='''*'''
temp.append('''X''' )
for i in range(len(lowerCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCamelCase_ ) == 0:
return pi
snake_case : Optional[int] =list(set(lowerCamelCase_ ) )
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : List[Any] =[]
for minterm in minterms:
snake_case : List[str] =''''''
for _ in range(lowerCamelCase_ ):
snake_case : List[str] =str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCamelCase_ )
return temp
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =list(lowerCamelCase_ )
snake_case : str =list(lowerCamelCase_ )
snake_case : List[str] =0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] =[]
snake_case : Dict =[0] * len(lowerCamelCase_ )
for i in range(len(chart[0] ) ):
snake_case : Dict =0
snake_case : List[Any] =-1
for j in range(len(lowerCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
snake_case : Tuple =j
if count == 1:
snake_case : List[Any] =1
for i in range(len(lowerCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCamelCase_ ) ):
snake_case : Any =0
temp.append(prime_implicants[i] )
while True:
snake_case : Dict =0
snake_case : Optional[int] =-1
snake_case : Optional[int] =0
for i in range(len(lowerCamelCase_ ) ):
snake_case : Dict =chart[i].count(1 )
if count_n > max_n:
snake_case : Union[str, Any] =count_n
snake_case : List[str] =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCamelCase_ ) ):
snake_case : str =0
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] =[[0 for x in range(len(lowerCamelCase_ ) )] for x in range(len(lowerCamelCase_ ) )]
for i in range(len(lowerCamelCase_ ) ):
snake_case : Optional[int] =prime_implicants[i].count('''_''' )
for j in range(len(lowerCamelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCamelCase_ ):
snake_case : Optional[int] =1
return chart
def _a ( ):
snake_case : int =int(input('''Enter the no. of variables\n''' ) )
snake_case : Tuple =[
float(lowerCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case : Any =decimal_to_binary(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[Any] =check(lowerCamelCase_ )
print('''Prime Implicants are:''' )
print(lowerCamelCase_ )
snake_case : Any =prime_implicant_chart(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[int] =selection(lowerCamelCase_ , lowerCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 136 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ : str = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowerCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ , num_return_sequences=2 , return_tensors=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
] , )
lowerCAmelCase__ = text_generator.model.config.eos_token_id
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowerCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase_ , )
self.assertEqual(
lowerCamelCase_ , [
[
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
],
[
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = TextGenerationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_generator, ["This is a test", "Another test"]
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''Hello I believe in'''
lowerCAmelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = text_generator(lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ = text_generator(lowerCamelCase_ , stop_sequence=''' fe''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = text_generator.model
lowerCAmelCase__ = text_generator.tokenizer
lowerCAmelCase__ = text_generator('''This is a test''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ = text_generator('''This is a test''' , return_full_text=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ = pipeline(task='''text-generation''' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , return_full_text=lowerCamelCase_ )
lowerCAmelCase__ = text_generator('''This is a test''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ = text_generator('''This is a test''' , return_full_text=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
] , )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_full_text=lowerCamelCase_ , return_text=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_full_text=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_text=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ = text_generator('''''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
lowerCAmelCase__ = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowerCamelCase_ ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
# Classic `model_kwargs`
lowerCAmelCase__ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowerCamelCase_ , top_p=0.5 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = '''Hello world'''
lowerCAmelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowerCamelCase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_new_tokens=1 )
self.assertNotIn(lowerCamelCase_ , cl.out )
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_length=10 )
self.assertNotIn(lowerCamelCase_ , cl.out ) | 90 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :int , lowercase_ :str )-> Any:
A__ = 3
A__ = 2_50
A__ = ids_tensor((batch_size, length) , lowercase_ )
A__ = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase_ ( self :str )-> Dict:
A__, A__ = self._get_tensors(5 )
A__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__, A__ = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__, A__ = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self :List[Any] )-> List[Any]:
A__ = MaxLengthCriteria(max_length=10 )
A__, A__ = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__, A__ = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__, A__ = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self :Tuple )-> int:
A__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
A__, A__ = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__, A__ = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__, A__ = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
A__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase_ ( self :Dict )-> int:
A__, A__ = self._get_tensors(5 )
A__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
A__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self :Union[str, Any] )-> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
A__ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 440 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : List[Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : str = 'gpt_bigcode'
__lowercase : int = ['past_key_values']
__lowercase : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu_pytorch_tanh" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
A_ = vocab_size
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = scale_attn_weights
A_ = use_cache
A_ = attention_softmax_in_fpaa
A_ = scale_attention_softmax_in_fpaa
A_ = multi_query
A_ = bos_token_id
A_ = eos_token_id
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 712 | '''simple docstring'''
from timeit import timeit
__snake_case : List[Any] = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( _UpperCamelCase : str ) -> bool:
A_ = 0
A_ = len(_UpperCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( _UpperCamelCase : str ) -> bool:
A_ = len(_UpperCamelCase ) // 2
A_ = len(_UpperCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_UpperCamelCase ) )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> bool:
if len(_UpperCamelCase ) <= 2:
return True
if s[0] == s[len(_UpperCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( _UpperCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( _UpperCamelCase : str ) -> None:
A_ = F'''all({name}(key) is value for key, value in test_data.items())'''
A_ = F'''from __main__ import test_data, {name}'''
A_ = 50_00_00
A_ = timeit(stmt=_UpperCamelCase, setup=_UpperCamelCase, number=_UpperCamelCase )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 174 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ (*lowerCAmelCase__: Dict ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" ) as fh:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase__ )
finally:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
a : Tuple = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
a : Union[str, Any] = torch.device('cuda', local_rank)
a : Any = socket.gethostname()
a : Optional[Any] = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a : Optional[Any] = dist.get_rank()
a : List[Any] = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 556 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=36, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Tuple:
UpperCAmelCase_: Any = parent
UpperCAmelCase_: Union[str, Any] = batch_size
UpperCAmelCase_: int = seq_length
UpperCAmelCase_: Any = is_training
UpperCAmelCase_: str = use_input_mask
UpperCAmelCase_: Dict = use_token_type_ids
UpperCAmelCase_: Any = use_labels
UpperCAmelCase_: Any = vocab_size
UpperCAmelCase_: Dict = embedding_size
UpperCAmelCase_: Dict = hidden_size
UpperCAmelCase_: Dict = num_hidden_layers
UpperCAmelCase_: Dict = num_hidden_groups
UpperCAmelCase_: int = num_attention_heads
UpperCAmelCase_: List[str] = intermediate_size
UpperCAmelCase_: Any = hidden_act
UpperCAmelCase_: Dict = hidden_dropout_prob
UpperCAmelCase_: Any = attention_probs_dropout_prob
UpperCAmelCase_: Tuple = max_position_embeddings
UpperCAmelCase_: Tuple = type_vocab_size
UpperCAmelCase_: List[Any] = type_sequence_label_size
UpperCAmelCase_: Any = initializer_range
UpperCAmelCase_: int = num_labels
UpperCAmelCase_: Optional[int] = num_choices
UpperCAmelCase_: Tuple = scope
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Tuple = None
if self.use_input_mask:
UpperCAmelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: Dict = None
if self.use_token_type_ids:
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Tuple = None
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Tuple = None
if self.use_labels:
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: int = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: Any = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, sentence_order_label=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[int] = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: Optional[Any] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, start_positions=SCREAMING_SNAKE_CASE_, end_positions=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCAmelCase_: Optional[Any] = self.num_labels
UpperCAmelCase_: List[Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: List[str] = self.num_labels
UpperCAmelCase_: Union[str, Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = self.num_choices
UpperCAmelCase_: Dict = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: str = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: str = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: List[Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: Union[str, Any] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): List[str] = config_and_inputs
UpperCAmelCase_: Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = True
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Any:
UpperCAmelCase_: Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = AlbertModelTester(self )
UpperCAmelCase_: Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Union[str, Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Dict = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> Dict:
UpperCAmelCase_: str = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCAmelCase_: List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase_: List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
| 556 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def snake_case ( ):
A = os.path.dirname(os.path.realpath(UpperCAmelCase ) )
A = os.path.join(UpperCAmelCase, 'words.txt' )
A = ''
with open(UpperCAmelCase ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 700 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case ( UpperCAmelCase : List[Any] ):
if "model" in orig_key:
A = orig_key.replace('model.', '' )
if "norm1" in orig_key:
A = orig_key.replace('norm1', 'attention.output.LayerNorm' )
if "norm2" in orig_key:
A = orig_key.replace('norm2', 'output.LayerNorm' )
if "norm" in orig_key:
A = orig_key.replace('norm', 'LayerNorm' )
if "transformer" in orig_key:
A = orig_key.split('.' )[0].split('_' )[-1]
A = orig_key.replace(f'transformer_{layer_num}', f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
A = orig_key.replace('mha.attn', 'attention.self' )
if "mha" in orig_key:
A = orig_key.replace('mha', 'attention' )
if "W_q" in orig_key:
A = orig_key.replace('W_q', 'self.query' )
if "W_k" in orig_key:
A = orig_key.replace('W_k', 'self.key' )
if "W_v" in orig_key:
A = orig_key.replace('W_v', 'self.value' )
if "ff1" in orig_key:
A = orig_key.replace('ff1', 'intermediate.dense' )
if "ff2" in orig_key:
A = orig_key.replace('ff2', 'output.dense' )
if "ff" in orig_key:
A = orig_key.replace('ff', 'output.dense' )
if "mlm_class" in orig_key:
A = orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder' )
if "mlm" in orig_key:
A = orig_key.replace('mlm', 'cls.predictions.transform' )
if "cls" not in orig_key:
A = 'yoso.' + orig_key
return orig_key
def snake_case ( UpperCAmelCase : Tuple, UpperCAmelCase : str ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A = val
A = orig_state_dict['cls.predictions.decoder.bias']
A = torch.arange(UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Union[str, Any], UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )['model_state_dict']
A = YosoConfig.from_json_file(UpperCAmelCase )
A = YosoForMaskedLM(UpperCAmelCase )
A = convert_checkpoint_helper(config.max_position_embeddings, UpperCAmelCase )
print(model.load_state_dict(UpperCAmelCase ) )
model.eval()
model.save_pretrained(UpperCAmelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 110 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = os.path.abspath(_A )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_lowerCAmelCase : Union[str, Any] = tf.train.list_variables(_A )
_lowerCAmelCase : int = []
_lowerCAmelCase : str = []
_lowerCAmelCase : List[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_lowerCAmelCase : List[Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_lowerCAmelCase : Tuple = name[1:]
# figure out how many levels deep the name is
_lowerCAmelCase : List[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(_A )
# read data
_lowerCAmelCase : Dict = tf.train.load_variable(_A , _A )
names.append('/'.join(_A ) )
arrays.append(_A )
logger.info(f'Read a total of {len(_A ):,} layers' )
# Sanity check
if len(set(_A ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(_A ) )})' )
_lowerCAmelCase : Union[str, Any] = list(set(_A ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(_A , _A ):
_lowerCAmelCase : Tuple = full_name.split('/' )
_lowerCAmelCase : Tuple = model
_lowerCAmelCase : str = []
for i, m_name in enumerate(_A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
_lowerCAmelCase : Any = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
_lowerCAmelCase : List[Any] = getattr(_A , 'embeddings' )
_lowerCAmelCase : Union[str, Any] = getattr(_A , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
_lowerCAmelCase : List[Any] = getattr(_A , 'encoder' )
_lowerCAmelCase : int = getattr(_A , 'layer' )
_lowerCAmelCase : Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
_lowerCAmelCase : Optional[int] = getattr(_A , 'pooler' )
_lowerCAmelCase : Union[str, Any] = getattr(_A , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
_lowerCAmelCase : str = getattr(_A , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
_lowerCAmelCase : List[str] = getattr(_A , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
_lowerCAmelCase : int = getattr(_A , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
_lowerCAmelCase : List[Any] = getattr(_A , 'token_type_embeddings' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
_lowerCAmelCase : Optional[int] = getattr(_A , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
_lowerCAmelCase : Optional[int] = getattr(_A , 'attention' )
_lowerCAmelCase : Any = getattr(_A , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
_lowerCAmelCase : List[str] = getattr(_A , 'attention' )
_lowerCAmelCase : Optional[int] = getattr(_A , 'output' )
_lowerCAmelCase : Dict = getattr(_A , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
_lowerCAmelCase : Optional[int] = getattr(_A , 'attention' )
_lowerCAmelCase : List[Any] = getattr(_A , 'output' )
_lowerCAmelCase : int = getattr(_A , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
_lowerCAmelCase : Optional[Any] = getattr(_A , 'output' )
_lowerCAmelCase : List[str] = getattr(_A , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
_lowerCAmelCase : Optional[int] = getattr(_A , 'output' )
_lowerCAmelCase : Optional[Any] = getattr(_A , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
_lowerCAmelCase : int = getattr(_A , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
_lowerCAmelCase : Union[str, Any] = getattr(_A , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
_lowerCAmelCase : Optional[int] = getattr(_A , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
_lowerCAmelCase : int = getattr(_A , 'intermediate' )
_lowerCAmelCase : List[str] = getattr(_A , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
_lowerCAmelCase : Tuple = getattr(_A , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
_lowerCAmelCase : Any = getattr(_A , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
_lowerCAmelCase : str = getattr(_A , 'weight' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
_lowerCAmelCase : List[Any] = '.'.join(_A )
if re.match(r'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , _A ) or re.match(
r'(\S+)\.attention\.output\.dense\.weight' , _A ):
_lowerCAmelCase : List[str] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_lowerCAmelCase : int = array.transpose()
if pointer.shape == array.shape:
_lowerCAmelCase : List[str] = torch.from_numpy(_A )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def lowercase (_A , _A , _A ):
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
_lowerCAmelCase : int = BertConfig.from_json_file(_A )
_lowerCAmelCase : List[str] = BertModel(_A )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_A , _A , _A )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 444 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["torch", "torchsde"]
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def a ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def a ( cls , *snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 444 | 1 |
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__( _A ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = create_tensor(_A )
UpperCamelCase__ = gather(_A )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = [state.process_index]
UpperCamelCase__ = gather_object(_A )
assert len(_A ) == state.num_processes, f"{gathered_obj}, {len(_A )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = create_tensor(_A )
UpperCamelCase__ = broadcast(_A )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __magic_name__( _A ):
'''simple docstring'''
if state.is_main_process:
UpperCamelCase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCamelCase__ = torch.arange(state.num_processes ).to(state.device )
UpperCamelCase__ = pad_across_processes(_A )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __magic_name__( _A ):
'''simple docstring'''
if state.num_processes != 2:
return
UpperCamelCase__ = create_tensor(_A )
UpperCamelCase__ = reduce(_A , """sum""" )
UpperCamelCase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_A , _A ), f"{reduced_tensor} != {truth_tensor}"
def __magic_name__( _A ):
'''simple docstring'''
if state.num_processes != 2:
return
UpperCamelCase__ = create_tensor(_A )
UpperCamelCase__ = reduce(_A , """mean""" )
UpperCamelCase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_A , _A ), f"{reduced_tensor} != {truth_tensor}"
def __magic_name__( _A ):
'''simple docstring'''
main()
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = PartialState()
state.print(f"State: {state}" )
state.print("""testing gather""" )
test_gather(_A )
state.print("""testing gather_object""" )
test_gather_object(_A )
state.print("""testing broadcast""" )
test_broadcast(_A )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_A )
state.print("""testing reduce_sum""" )
test_reduce_sum(_A )
state.print("""testing reduce_mean""" )
test_reduce_mean(_A )
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : bool = True , lowercase : bool = False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = scheduler
UpperCamelCase__ = optimizers if isinstance(lowercase , (list, tuple) ) else [optimizers]
UpperCamelCase__ = split_batches
UpperCamelCase__ = step_with_optimizer
UpperCamelCase__ = GradientState()
def A ( self : int , *lowercase : int , **lowercase : str ) -> str:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase , **lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase , **lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase__ = AcceleratorState().num_processes
for _ in range(lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase , **lowercase )
else:
self.scheduler.step(*lowercase , **lowercase )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A ( self : Any ) -> int:
'''simple docstring'''
return self.scheduler.state_dict()
def A ( self : Any , lowercase : int ) -> Optional[int]:
'''simple docstring'''
self.scheduler.load_state_dict(lowercase )
def A ( self : str ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A ( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : str ) -> str:
'''simple docstring'''
return self.scheduler.print_lr(*lowercase , **lowercase )
| 265 | 0 |
def a ( A__ = 1_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = set()
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n + 1 # maximum limit
for a in range(2 , A__ ):
for b in range(2 , A__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = a**b # calculates the current power
collect_powers.add(A__ ) # adds the result to the set
return len(A__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 35 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __a ( a ):
"""simple docstring"""
_a = min(a ) # min() finds the minimum value
_a = max(a ) # max() finds the maximum value
_a = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_a = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a, a ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_a = 0
for count in range(a ):
while holes[count] > 0:
holes[count] -= 1
_a = count + min_val
i += 1
def __a ( ):
"""simple docstring"""
_a = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a )
print("Sorted order is:", " ".join(a ) )
if __name__ == "__main__":
main()
| 388 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCAmelCase : str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A__ ( __A):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Optional[Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Optional[int] = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : List[str] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCAmelCase : List[str] = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
__lowerCAmelCase : List[Any] = self._execution_device
__lowerCAmelCase : Union[str, Any] = guidance_scale > 1.0
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps
__lowerCAmelCase : Dict = self.unet.config.in_channels
__lowerCAmelCase : int = downscale_height_and_width(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'image_embeds': image_embeds}
__lowerCAmelCase : Union[str, Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase : Dict = noise_pred.chunk(2 )
__lowerCAmelCase : Tuple = variance_pred.chunk(2 )
__lowerCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : Optional[int] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )[0]
# post-processing
__lowerCAmelCase : Optional[int] = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : Optional[int] = image * 0.5 + 0.5
__lowerCAmelCase : Optional[int] = image.clamp(0 , 1 )
__lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Dict = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 704 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : List[Any] = embeddings_size
__lowerCAmelCase : Dict = hidden_sizes
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : List[str] = scope
__lowerCAmelCase : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : str = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
A_ : List[Any] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = False
A_ : str = False
A_ : int = False
A_ : Union[str, Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = RegNetModelTester(self )
__lowerCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Tuple = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : List[Any] = layer_type
__lowerCAmelCase : Tuple = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.default_image_processor
__lowerCAmelCase : Any = prepare_img()
__lowerCAmelCase : Any = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 549 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) -> int:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase_ ( a , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : str = True
__lowerCAmelCase : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRobertaModelTester(self )
@slow
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('roberta-base' , from_pt=a_ )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
| 447 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = '▁'
_a : str = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_a : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_a : Union[str, Any] = {'vinai/bartpho-syllable': 1_024}
class lowercase_ ( a ):
'''simple docstring'''
__lowerCAmelCase : Dict = VOCAB_FILES_NAMES
__lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self , a_ , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_ = None , **a_ , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a_ ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(a_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(a_ ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a_ ) -> int:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self , a_ , a_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a_ , a_ = None , a_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def snake_case_ ( self , a_ , a_ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , a_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case_ ( self , a_ ) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self , a_ ) -> Dict:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = ''.join(a_ ).replace(a_ , ' ' ).strip()
return out_string
def snake_case_ ( self , a_ , a_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(a_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 447 | 1 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
__A = '''us-east-1''' # defaults region
@dataclass
class a_ :
_snake_case = 42
_snake_case = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
_snake_case = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5500,
}
_snake_case = {**hyperparameters, """max_steps""": 1000}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
return F"""{self.framework}-transfromers-test"""
@property
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _SCREAMING_SNAKE_CASE ( A : Dict ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework ) | 712 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 61 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A ( _lowerCAmelCase ):
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' ,_lowerCAmelCase ,)
| 22 |
'''simple docstring'''
UpperCAmelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( __a ):
def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Any , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : float = 0.0 , snake_case__ : int = 50 , snake_case__ : Optional[bool] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , snake_case__ ):
lowercase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase = randn_tensor(snake_case__ , generator=snake_case__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase = self.unet(snake_case__ , snake_case__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , eta=snake_case__ , use_clipped_model_output=snake_case__ , generator=snake_case__ ).prev_sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 72 |
# using dfs for finding eulerian path traversal
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase , lowercase = True, True
lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return path
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = 0
lowercase = -1
for i in range(lowerCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowercase = 1
if check == 2:
lowercase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
print(lowerCAmelCase__ )
def UpperCamelCase__ ( ):
lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase = {
1: [],
2: []
# all degree is zero
}
lowercase = 10
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
check_euler(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 72 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 517 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''albert'''
def __init__( self : List[Any] , snake_case : str=30000 , snake_case : Optional[int]=128 , snake_case : List[Any]=4096 , snake_case : str=12 , snake_case : str=1 , snake_case : Dict=64 , snake_case : Optional[Any]=16384 , snake_case : int=1 , snake_case : Any="gelu_new" , snake_case : List[str]=0 , snake_case : Any=0 , snake_case : List[str]=512 , snake_case : Optional[Any]=2 , snake_case : int=0.02 , snake_case : Tuple=1e-12 , snake_case : str=0.1 , snake_case : Optional[Any]="absolute" , snake_case : List[str]=0 , snake_case : List[Any]=2 , snake_case : Optional[int]=3 , **snake_case : str , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
_snake_case : Optional[Any] = vocab_size
_snake_case : int = embedding_size
_snake_case : List[str] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Optional[Any] = num_hidden_groups
_snake_case : Tuple = num_attention_heads
_snake_case : Any = inner_group_num
_snake_case : Union[str, Any] = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Dict = type_vocab_size
_snake_case : Dict = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : str = classifier_dropout_prob
_snake_case : Union[str, Any] = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 517 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[int]=1_3 , __snake_case : List[str]=3_2 , __snake_case : Optional[int]=3 , __snake_case : str=4 , __snake_case : Optional[int]=[1_0, 2_0, 3_0, 4_0] , __snake_case : Optional[int]=[2, 2, 3, 2] , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=3_7 , __snake_case : Any="gelu" , __snake_case : int=1_0 , __snake_case : Optional[int]=0.02 , __snake_case : Union[str, Any]=["stage2", "stage3", "stage4"] , __snake_case : Optional[Any]=[2, 3, 4] , __snake_case : str=None , ) -> List[Any]:
__magic_name__: int = parent
__magic_name__: int = batch_size
__magic_name__: Union[str, Any] = image_size
__magic_name__: Dict = num_channels
__magic_name__: List[Any] = num_stages
__magic_name__: Dict = hidden_sizes
__magic_name__: Optional[Any] = depths
__magic_name__: Dict = is_training
__magic_name__: Union[str, Any] = use_labels
__magic_name__: str = intermediate_size
__magic_name__: List[str] = hidden_act
__magic_name__: Tuple = num_labels
__magic_name__: Any = initializer_range
__magic_name__: Optional[int] = out_features
__magic_name__: Optional[Any] = out_indices
__magic_name__: Optional[int] = scope
def lowerCamelCase__ ( self : str ) -> Tuple:
__magic_name__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__: Any = None
if self.use_labels:
__magic_name__: Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__: Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : int ) -> str:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int ) -> Dict:
__magic_name__: Optional[int] = ConvNextVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Union[str, Any] = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase__ ( self : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[int] ) -> str:
__magic_name__: int = ConvNextVaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Optional[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] ) -> List[str]:
__magic_name__: Optional[int] = ConvNextVaBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Tuple = model(__snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__magic_name__: Union[str, Any] = None
__magic_name__: str = ConvNextVaBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Dict = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__magic_name__: Optional[int] = self.prepare_config_and_inputs()
__magic_name__, __magic_name__, __magic_name__: str = config_and_inputs
__magic_name__: str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__magic_name__: str = self.prepare_config_and_inputs()
__magic_name__, __magic_name__, __magic_name__: Union[str, Any] = config_and_inputs
__magic_name__: List[Any] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Tuple ) -> int:
__magic_name__: List[str] = ConvNextVaModelTester(self )
__magic_name__: int = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=3_7 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Any ) -> int:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCamelCase__ ( self : int ) -> List[str]:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
pass
def lowerCamelCase__ ( self : Tuple ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__magic_name__, __magic_name__: str = self.model_tester.prepare_config_and_inputs_with_labels()
__magic_name__: List[str] = True
if model_class.__name__ in [
*get_values(__snake_case ),
*get_values(__snake_case ),
]:
continue
__magic_name__: int = model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__: str = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
__magic_name__: Optional[Any] = model(**__snake_case ).loss
loss.backward()
def lowerCamelCase__ ( self : int ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__magic_name__, __magic_name__: Optional[int] = self.model_tester.prepare_config_and_inputs_with_labels()
__magic_name__: Any = False
__magic_name__: int = True
if (
model_class.__name__
in [*get_values(__snake_case ), *get_values(__snake_case )]
or not model_class.supports_gradient_checkpointing
):
continue
__magic_name__: Optional[Any] = model_class(__snake_case )
model.to(__snake_case )
model.gradient_checkpointing_enable()
model.train()
__magic_name__: int = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
__magic_name__: List[Any] = model(**__snake_case ).loss
loss.backward()
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__, __magic_name__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__: Dict = model_class(__snake_case )
__magic_name__: Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__: Dict = [*signature.parameters.keys()]
__magic_name__: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int] ):
__magic_name__: Optional[Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
__magic_name__: List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
__magic_name__: List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__: int = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__, __magic_name__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__: List[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__: Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: List[Any] = ConvNextVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def a ( ) -> Any:
__magic_name__: int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : int ) -> Dict:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : int ) -> int:
__magic_name__: Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__snake_case )
__magic_name__: Any = self.default_image_processor
__magic_name__: str = prepare_img()
__magic_name__: List[str] = preprocessor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
__magic_name__: int = model(**__snake_case )
# verify the logits
__magic_name__: Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __snake_case )
__magic_name__: Union[str, Any] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 213 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "dpr"
def __init__( self : str , __snake_case : Any=3_0_5_2_2 , __snake_case : Any=7_6_8 , __snake_case : Any=1_2 , __snake_case : Any=1_2 , __snake_case : int=3_0_7_2 , __snake_case : List[Any]="gelu" , __snake_case : int=0.1 , __snake_case : Dict=0.1 , __snake_case : List[str]=5_1_2 , __snake_case : Optional[int]=2 , __snake_case : Any=0.02 , __snake_case : Tuple=1E-12 , __snake_case : int=0 , __snake_case : List[Any]="absolute" , __snake_case : int = 0 , **__snake_case : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__snake_case , **__snake_case )
__magic_name__: int = vocab_size
__magic_name__: Tuple = hidden_size
__magic_name__: Optional[Any] = num_hidden_layers
__magic_name__: Dict = num_attention_heads
__magic_name__: Any = hidden_act
__magic_name__: Tuple = intermediate_size
__magic_name__: int = hidden_dropout_prob
__magic_name__: Any = attention_probs_dropout_prob
__magic_name__: Any = max_position_embeddings
__magic_name__: Optional[Any] = type_vocab_size
__magic_name__: Optional[Any] = initializer_range
__magic_name__: Optional[Any] = layer_norm_eps
__magic_name__: int = projection_dim
__magic_name__: Optional[int] = position_embedding_type
| 213 | 1 |
'''simple docstring'''
from math import pi
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 69 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str:
if hor == 1_28:
__snake_case = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__snake_case = (32, 1_28, 2_56)
__snake_case = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__snake_case = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__snake_case = (32, 64, 1_28, 2_56)
__snake_case = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__snake_case = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__snake_case = model.state_dict()
__snake_case = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( ) -> List[Any]:
__snake_case = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__snake_case = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__snake_case = model
__snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 69 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case_ )
if is_prime(snake_case_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=() , snake_case_=None , snake_case_="no" , snake_case_="29500" ):
_lowercase = False
_lowercase = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
_lowercase = True
elif "IPython" in sys.modules:
_lowercase = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
_lowercase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , snake_case_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
_lowercase = 8
_lowercase = PrepareForLaunch(snake_case_ , distributed_type="""TPU""" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(snake_case_ , args=snake_case_ , nprocs=snake_case_ , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*snake_case_ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=snake_case_ , master_addr="""127.0.01""" , master_port=snake_case_ , mixed_precision=snake_case_ ):
_lowercase = PrepareForLaunch(snake_case_ , distributed_type="""MULTI_GPU""" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(snake_case_ , args=snake_case_ , nprocs=snake_case_ , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowercase = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=() , snake_case_=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=snake_case_ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
_lowercase = PrepareForLaunch(snake_case_ , debug=snake_case_ )
start_processes(snake_case_ , args=snake_case_ , nprocs=snake_case_ , start_method="""fork""" )
| 572 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_ = {
'gpt-neox-20b': 2048,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> List[str]:
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase_) != add_prefix_space:
UpperCamelCase = getattr(lowerCamelCase_ , pre_tok_state.pop('''type'''))
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**lowerCamelCase_)
UpperCamelCase = add_prefix_space
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_)
return tuple(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[int]:
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_) + [self.eos_token_id])
if len(lowerCamelCase_) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids | 34 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = 'docs/source/en/_toctree.yml'
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Optional[int] = defaultdict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCAmelCase )
__lowerCamelCase : Dict = new_doc_list
__lowerCamelCase : Optional[Any] = [key for key, value in counts.items() if value > 1]
__lowerCamelCase : int = []
for duplicate_key in duplicates:
__lowerCamelCase : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowerCamelCase : Dict = sorted(_lowerCAmelCase ,key=lambda _lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCAmelCase )
# Sort
return overview_doc
def a_ ( _lowerCAmelCase=False ) -> Optional[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCamelCase : List[Any] = api_doc[scheduler_idx]['sections']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
__lowerCamelCase : str = False
if new_scheduler_doc != scheduler_doc:
__lowerCamelCase : int = True
if overwrite:
__lowerCamelCase : Any = new_scheduler_doc
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( _lowerCAmelCase=False ) -> List[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : Optional[Any] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCamelCase : int = False
__lowerCamelCase : str = api_doc[pipeline_idx]['sections']
__lowerCamelCase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCamelCase : str = pipeline_doc['section']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
if overwrite:
__lowerCamelCase : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCAmelCase )
# sort overall pipeline doc
__lowerCamelCase : int = clean_doc_toc(_lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__lowerCamelCase : Tuple = True
if overwrite:
__lowerCamelCase : int = new_pipeline_docs
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 459 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def _A ( __lowercase , __lowercase = 16 ):
"""simple docstring"""
lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowercase , max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ = 8
else:
lowerCamelCase__ = None
return tokenizer.pad(
__lowercase , padding="""longest""" , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
lowerCamelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__magic_name__ = mocked_dataloaders # noqa: F811
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowercase ) == "1":
lowerCamelCase__ = 2
# New Code #
lowerCamelCase__ = int(args.gradient_accumulation_steps )
lowerCamelCase__ = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["""lr"""]
lowerCamelCase__ = int(config["""num_epochs"""] )
lowerCamelCase__ = int(config["""seed"""] )
lowerCamelCase__ = int(config["""batch_size"""] )
lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowercase )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowercase , __lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowercase )
# Instantiate scheduler
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Now we train the model
for epoch in range(__lowercase ):
model.train()
with LocalSGD(
accelerator=__lowercase , model=__lowercase , local_sgd_steps=__lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase ):
lowerCamelCase__ = model(**__lowercase )
lowerCamelCase__ = output.loss
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**__lowercase )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowercase , default=__lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowercase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__lowercase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 258 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "isbn/0140328726" ):
snake_case__ = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
snake_case__ = F"""{olid} is not a valid Open Library olid"""
raise ValueError(__lowerCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
snake_case__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case__ = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
snake_case__ = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = ", ".join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__magic_name__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__magic_name__ = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('''\n'''.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 276 |
import math
__magic_name__ = 10
__magic_name__ = 7
__magic_name__ = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 20 ):
snake_case__ = math.comb(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCAmelCase )
snake_case__ = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 276 | 1 |
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self ):
lowerCamelCase_ : Tuple = {}
def _UpperCamelCase ( self ):
print(self.vertex )
for i in self.vertex:
print(a_ , " -> " , " -> ".join([str(a_ ) for j in self.vertex[i]] ) )
def _UpperCamelCase ( self , a_ , a_ ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a_ )
else:
# else make a new vertex
lowerCamelCase_ : Any = [to_vertex]
def _UpperCamelCase ( self ):
# visited array for storing already visited nodes
lowerCamelCase_ : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ ):
# mark start vertex as visited
lowerCamelCase_ : Dict = True
print(a_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a_ , a_ )
if __name__ == "__main__":
__magic_name__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 73 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: List[str] = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase__: List[Any] = get_aligned_output_features_output_indices(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""c"""] )
self.assertEqual(UpperCamelCase__ , [2] )
# Out indices set to match out features
lowerCamelCase__: Optional[int] = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features set to match out indices
lowerCamelCase__: Optional[Any] = get_aligned_output_features_output_indices(UpperCamelCase__ , [0, 2] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase__: Any = get_aligned_output_features_output_indices(UpperCamelCase__ , [-3, -1] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase__ , [-3, -1] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase__ )
# Out features must be a list
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = BackboneMixin()
lowerCamelCase__: List[Any] = ["a", "b", "c"]
lowerCamelCase__: List[Any] = ["a", "c"]
lowerCamelCase__: Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase__: Any = ["a", "b"]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase__: Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 306 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = (KDPMaDiscreteScheduler,)
lowerCamelCase_ : str = 1_0
def _lowercase ( self , **UpperCamelCase__ ) -> int:
lowerCamelCase : Optional[Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase__ )
return config
def _lowercase ( self ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _lowercase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _lowercase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Union[str, Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : List[str] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def _lowercase ( self ) -> str:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Optional[Any] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : int = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def _lowercase ( self ) -> Optional[int]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Any = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = output.prev_sample
lowerCamelCase : Dict = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 311 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class snake_case__ ( _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = 'instructblip_vision_model'
def __init__( self : str , lowercase : Optional[int]=14_08 , lowercase : Any=61_44 , lowercase : Dict=39 , lowercase : Optional[int]=16 , lowercase : Optional[Any]=2_24 , lowercase : List[Any]=14 , lowercase : Optional[Any]="gelu" , lowercase : List[str]=1E-6 , lowercase : int=0.0 , lowercase : Tuple=1E-10 , lowercase : List[Any]=True , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = patch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Dict = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : Tuple , lowercase : Union[str, os.PathLike] , **lowercase : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : List[Any] = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class snake_case__ ( _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = 'instructblip_qformer'
def __init__( self : Dict , lowercase : Optional[Any]=3_05_22 , lowercase : Union[str, Any]=7_68 , lowercase : Dict=12 , lowercase : int=12 , lowercase : Optional[Any]=30_72 , lowercase : str="gelu" , lowercase : Union[str, Any]=0.1 , lowercase : str=0.1 , lowercase : Any=5_12 , lowercase : List[str]=0.0_2 , lowercase : int=1E-12 , lowercase : Any=0 , lowercase : Tuple="absolute" , lowercase : Optional[int]=2 , lowercase : List[Any]=14_08 , **lowercase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : Optional[Any] = position_embedding_type
UpperCAmelCase : Optional[int] = cross_attention_frequency
UpperCAmelCase : int = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowercase : Union[str, os.PathLike] , **lowercase : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class snake_case__ ( _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = 'instructblip'
SCREAMING_SNAKE_CASE__ = True
def __init__( self : Optional[Any] , lowercase : str=None , lowercase : Union[str, Any]=None , lowercase : Optional[int]=None , lowercase : Dict=32 , **lowercase : List[str] ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
if vision_config is None:
UpperCAmelCase : Optional[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCAmelCase : Tuple = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase : Union[str, Any] = InstructBlipVisionConfig(**_lowerCAmelCase )
UpperCAmelCase : Tuple = InstructBlipQFormerConfig(**_lowerCAmelCase )
UpperCAmelCase : Optional[int] = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase : Dict = CONFIG_MAPPING[text_model_type](**_lowerCAmelCase )
UpperCAmelCase : List[Any] = self.text_config.tie_word_embeddings
UpperCAmelCase : List[str] = self.text_config.is_encoder_decoder
UpperCAmelCase : int = num_query_tokens
UpperCAmelCase : Dict = self.vision_config.hidden_size
UpperCAmelCase : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Tuple = 1.0
UpperCAmelCase : Dict = 0.0_2
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowercase : InstructBlipVisionConfig , lowercase : InstructBlipQFormerConfig , lowercase : PretrainedConfig , **lowercase : Any , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCAmelCase , )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : int = self.vision_config.to_dict()
UpperCAmelCase : str = self.qformer_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.text_config.to_dict()
UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 709 |
"""simple docstring"""
snake_case_ : List[str] = 6_5_5_2_1
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[int] = 0
for plain_chr in plain_text:
UpperCAmelCase : int = (a + ord(_lowercase )) % MOD_ADLER
UpperCAmelCase : str = (b + a) % MOD_ADLER
return (b << 16) | a
| 292 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( lowercase__ ):
_lowerCamelCase : Any = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
_lowerCamelCase : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ = logging.getLogger(__name__)
def _snake_case ( lowercase__ , lowercase__ ):
if metric == "rouge2":
_lowerCamelCase : str = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCamelCase : int = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCamelCase : int = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_lowerCamelCase : List[str] = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_lowerCamelCase : List[Any] = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( lowercase__ , lowercase__ ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : List[str] = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A_ )
@rank_zero_only
def A_ ( self , lowercase , lowercase , lowercase , lowercase=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_lowerCamelCase : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCamelCase : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase : Optional[Any] = od / 'test_results.txt'
_lowerCamelCase : List[Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase : Any = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowerCamelCase : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=A_ )
generations_file.parent.mkdir(exist_ok=A_ )
with open(A_ , 'a+' ) as writer:
for key in sorted(A_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase : Tuple = metrics[key]
if isinstance(A_ , torch.Tensor ):
_lowerCamelCase : str = val.item()
_lowerCamelCase : Optional[Any] = F'''{key}: {val:.6f}\n'''
writer.write(A_ )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase : Optional[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(A_ )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
try:
_lowerCamelCase : Dict = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase : List[str] = pl_module.model.num_parameters()
_lowerCamelCase : Optional[int] = count_trainable_parameters(A_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A_ , A_ , 'test' )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 630 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"""do_convert_rgb""": True,
}
lowerCAmelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A_ , A_ )
def __snake_case ( self , **A_ ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self , **A_ ) -> Dict:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self , **A_ ) -> List[Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=A_ )
lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(A_ , return_tensors="""np""" )
lowerCAmelCase = processor(images=A_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase = processor(text=A_ )
lowerCAmelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(A_ )
lowerCAmelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 433 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] ="swin"
SCREAMING_SNAKE_CASE_ : Tuple ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2_24 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=96 , SCREAMING_SNAKE_CASE__ : Any=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Any=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Tuple=4.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(_UpperCamelCase )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
UpperCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(_UpperCamelCase ) + 1 )]
UpperCamelCase = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( __lowerCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =version.parse("1.11" )
@property
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return 1e-4
| 715 |
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if k in (0.04, 0.06):
UpperCamelCase = k
UpperCamelCase = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ):
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
UpperCamelCase , UpperCamelCase = img.shape
UpperCamelCase = []
UpperCamelCase = img.copy()
UpperCamelCase = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_GRAY2RGB )
UpperCamelCase , UpperCamelCase = np.gradient(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = dx**2
UpperCamelCase = dy**2
UpperCamelCase = dx * dy
UpperCamelCase = 0.04
UpperCamelCase = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE__ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE__ , w - offset ):
UpperCamelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = (wxx * wyy) - (wxy**2)
UpperCamelCase = wxx + wyy
UpperCamelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 170 | 0 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> List[Any]:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
a_ = 4
a_ = (1 << p) - 1
for _ in range(p - 2 ):
a_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 685 |
"""simple docstring"""
from timeit import timeit
def UpperCAmelCase ( snake_case : int ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_lowerCAmelCase:str = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase ( snake_case : int ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
_lowerCAmelCase:Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase ( ):
def do_benchmark(snake_case : int ) -> None:
_lowerCAmelCase:Optional[int] = '''import __main__ as z'''
print(F'Benchmark when {number = }:' )
print(F'{get_set_bits_count_using_modulo_operator(snake_case ) = }' )
_lowerCAmelCase:List[Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case )
print(F'timeit() runs in {timing} seconds' )
print(F'{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }' )
_lowerCAmelCase:List[str] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case , )
print(F'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 227 | 0 |
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowercase : Union[str, Any] = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b"
__lowercase : Tuple = str(bin(lowerCAmelCase_ ) )[2:]
__lowercase : List[str] = max(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase_ ) , b_binary.zfill(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 649 | 0 |
import argparse
from collections import defaultdict
import yaml
__A = "docs/source/en/_toctree.yml"
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =defaultdict(UpperCamelCase__ )
lowerCamelCase__: Dict =[]
lowerCamelCase__: Union[str, Any] =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(UpperCamelCase__ )
lowerCamelCase__: int =new_doc_list
lowerCamelCase__: List[str] =[key for key, value in counts.items() if value > 1]
lowerCamelCase__: Any =[]
for duplicate_key in duplicates:
lowerCamelCase__: Union[str, Any] =list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
lowerCamelCase__: Optional[int] =sorted(UpperCamelCase__ , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def lowerCAmelCase_ ( __a=False ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase__ , encoding="utf-8" ) as f:
lowerCamelCase__: Dict =yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase__: Any =0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase__: Union[str, Any] =content[api_idx]["""sections"""]
# Then to the model doc
lowerCamelCase__: Any =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCamelCase__: Any =api_doc[scheduler_idx]["""sections"""]
lowerCamelCase__: Union[str, Any] =clean_doc_toc(UpperCamelCase__ )
lowerCamelCase__: Union[str, Any] =False
if new_scheduler_doc != scheduler_doc:
lowerCamelCase__: int =True
if overwrite:
lowerCamelCase__: Union[str, Any] =new_scheduler_doc
if diff:
if overwrite:
lowerCamelCase__: str =api_doc
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def lowerCAmelCase_ ( __a=False ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase__ , encoding="utf-8" ) as f:
lowerCamelCase__: Any =yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase__: Tuple =0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase__: List[Any] =content[api_idx]["""sections"""]
# Then to the model doc
lowerCamelCase__: int =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCamelCase__: Optional[Any] =False
lowerCamelCase__: Optional[int] =api_doc[pipeline_idx]["""sections"""]
lowerCamelCase__: Dict =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCamelCase__: Dict =pipeline_doc["""section"""]
lowerCamelCase__: Optional[int] =clean_doc_toc(UpperCamelCase__ )
if overwrite:
lowerCamelCase__: int =new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
lowerCamelCase__: Union[str, Any] =clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
lowerCamelCase__: List[Any] =True
if overwrite:
lowerCamelCase__: int =new_pipeline_docs
if diff:
if overwrite:
lowerCamelCase__: Dict =api_doc
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__A = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 59 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__A ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__A ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__A ='allenai'
def _UpperCamelCase ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase__ : str = d[k] # restore
return da
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : str = basename(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = dirname(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ : Optional[int] = cls.hub_models()
UpperCAmelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
UpperCAmelCase__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase__ : int = hub_utils.from_pretrained(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , archive_map=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = vars(chkpt["""args"""]["""model"""] )
UpperCAmelCase__ : Dict = args["""source_lang"""]
UpperCAmelCase__ : int = args["""target_lang"""]
UpperCAmelCase__ : Dict = dirname(UpperCamelCase__ )
UpperCAmelCase__ : Any = basename(UpperCamelCase__ )
# dicts
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , f'''dict.{src_lang}.txt''' )
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , f'''dict.{tgt_lang}.txt''' )
UpperCAmelCase__ : str = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ : List[str] = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ : Optional[int] = False
break
UpperCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Any = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ : Tuple = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__ , encoding="""utf-8""" ) as fin:
UpperCAmelCase__ : int = fin.read()
UpperCAmelCase__ : Union[str, Any] = re.sub(R""" \d+$""" , """""" , UpperCamelCase__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase__ )
# model config
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase__ : int = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
UpperCAmelCase__ : List[Any] = 5
UpperCAmelCase__ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
UpperCAmelCase__ : Dict = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
UpperCAmelCase__ : Dict = chkpt["""models"""][0]
UpperCAmelCase__ : int = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ : List[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ : Dict = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
# save
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path) | 407 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A ( __snake_case : ArgumentParser ) -> int:
raise NotImplementedError()
@abstractmethod
def A ( self : str ) -> Union[str, Any]:
raise NotImplementedError()
| 700 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self : List[str] , __snake_case : int , __snake_case : Dict=13 , __snake_case : Dict=7 , __snake_case : Union[str, Any]=True , __snake_case : Dict=False , __snake_case : List[str]=99 , __snake_case : str=32 , __snake_case : Optional[int]=2 , __snake_case : int=4 , __snake_case : str=37 , __snake_case : Optional[int]=0.1 , __snake_case : Any=0.1 , __snake_case : int=20 , __snake_case : List[str]=2 , __snake_case : List[str]=1 , __snake_case : str=0 , __snake_case : List[str]=4 , ) -> Optional[Any]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Dict = pad_token_id
UpperCAmelCase : List[str] = bos_token_id
UpperCAmelCase : Tuple = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase : Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase : Union[str, Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : int ) -> Optional[int]:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_led_inputs_dict(__snake_case , __snake_case , __snake_case )
UpperCAmelCase : Any = tf.concat(
[tf.zeros_like(__snake_case )[:, :-1], tf.ones_like(__snake_case )[:, -1:]] , axis=-1 , )
UpperCAmelCase : Any = global_attention_mask
return config, inputs_dict
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = TFLEDModel(config=__snake_case ).get_decoder()
UpperCAmelCase : List[str] = inputs_dict['''input_ids''']
UpperCAmelCase : Any = input_ids[:1, :]
UpperCAmelCase : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : str = 1
# first forward pass
UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
UpperCAmelCase , UpperCAmelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[Any] = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1E-3 )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=None , ) -> Optional[int]:
if attention_mask is None:
UpperCAmelCase : Dict = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : str ) -> Tuple:
UpperCAmelCase : Optional[Any] = TFLEDModelTester(self )
UpperCAmelCase : Tuple = ConfigTester(self , config_class=__snake_case )
def A ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def A ( self : int ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = self.model_tester.seq_length
UpperCAmelCase : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__snake_case : List[Any] ):
UpperCAmelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__snake_case : int ):
UpperCAmelCase : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase : str = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = model_class(__snake_case )
UpperCAmelCase : Tuple = model(self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase : Optional[int] = len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
UpperCAmelCase : Optional[Any] = model_class(__snake_case )
UpperCAmelCase : Optional[int] = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = model_class(__snake_case )
UpperCAmelCase : Optional[Any] = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : Any = True
UpperCAmelCase : List[str] = model_class(__snake_case )
UpperCAmelCase : Dict = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def A ( self : Any ) -> Union[str, Any]:
pass
def A ( self : str ) -> Any:
# TODO: Head-masking not yet implement
pass
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
return tf.constant(_lowerCAmelCase , dtype=tf.intaa )
UpperCamelCase__: Tuple = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase : Optional[int] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Dict = prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = model(**__snake_case )[0]
UpperCAmelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-3 )
def A ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase : Optional[int] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[int] = prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
UpperCAmelCase : Any = model(**__snake_case )[0]
UpperCAmelCase : Union[str, Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
UpperCAmelCase : Dict = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-3 , rtol=1E-3 )
| 528 | 0 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ : Dict = 1_00
lowerCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( _lowerCAmelCase : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCAmelCase : set[int] = set()
_UpperCAmelCase : int
_UpperCAmelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( _lowerCAmelCase : int = 5000 ) -> int | None:
for number_to_partition in range(1, _lowerCAmelCase ):
if len(partition(_lowerCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 238 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
__a : Optional[int] = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
__a : bool = field(
default=__a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class _UpperCAmelCase :
__a : str = field(
default=__a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
__a : str = field(
default=__a , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Train language if it is different from the evaluation language."""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a : Optional[bool] = field(
default=__a , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__a : bool = field(
default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""", _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_UpperCAmelCase : Union[str, Any] = load_dataset(
"""xnli""", model_args.language, split="""train""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
_UpperCAmelCase : Tuple = load_dataset(
"""xnli""", model_args.train_language, split="""train""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Dict = train_dataset.features["""label"""].names
if training_args.do_eval:
_UpperCAmelCase : Any = load_dataset(
"""xnli""", model_args.language, split="""validation""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Union[str, Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
_UpperCAmelCase : Tuple = load_dataset(
"""xnli""", model_args.language, split="""test""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : str = predict_dataset.features["""label"""].names
# Labels
_UpperCAmelCase : str = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowerCAmelCase, idalabel={str(_lowerCAmelCase ): label for i, label in enumerate(_lowerCAmelCase )}, labelaid={label: i for i, label in enumerate(_lowerCAmelCase )}, finetuning_task="""xnli""", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : Union[str, Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[Any] = False
def preprocess_function(_lowerCAmelCase : Dict ):
# Tokenize the texts
return tokenizer(
examples["""premise"""], examples["""hypothesis"""], padding=_lowerCAmelCase, max_length=data_args.max_seq_length, truncation=_lowerCAmelCase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase : Tuple = min(len(_lowerCAmelCase ), data_args.max_train_samples )
_UpperCAmelCase : Optional[int] = train_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_lowerCAmelCase, batched=_lowerCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc="""Running tokenizer on train dataset""", )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCAmelCase ) ), 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Tuple = min(len(_lowerCAmelCase ), data_args.max_eval_samples )
_UpperCAmelCase : int = eval_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_UpperCAmelCase : int = eval_dataset.map(
_lowerCAmelCase, batched=_lowerCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc="""Running tokenizer on validation dataset""", )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_UpperCAmelCase : List[Any] = min(len(_lowerCAmelCase ), data_args.max_predict_samples )
_UpperCAmelCase : Dict = predict_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
_UpperCAmelCase : Any = predict_dataset.map(
_lowerCAmelCase, batched=_lowerCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc="""Running tokenizer on prediction dataset""", )
# Get the metric function
_UpperCAmelCase : Dict = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : EvalPrediction ):
_UpperCAmelCase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions, _lowerCAmelCase ) else p.predictions
_UpperCAmelCase : Tuple = np.argmax(_lowerCAmelCase, axis=1 )
return metric.compute(predictions=_lowerCAmelCase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : Tuple = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : Optional[Any] = DataCollatorWithPadding(_lowerCAmelCase, pad_to_multiple_of=8 )
else:
_UpperCAmelCase : str = None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=_lowerCAmelCase, args=_lowerCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=_lowerCAmelCase, tokenizer=_lowerCAmelCase, data_collator=_lowerCAmelCase, )
# Training
if training_args.do_train:
_UpperCAmelCase : Tuple = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Union[str, Any] = last_checkpoint
_UpperCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = train_result.metrics
_UpperCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
_UpperCAmelCase : int = min(_lowerCAmelCase, len(_lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""", _lowerCAmelCase )
trainer.save_metrics("""train""", _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : List[str] = trainer.evaluate(eval_dataset=_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = min(_lowerCAmelCase, len(_lowerCAmelCase ) )
trainer.log_metrics("""eval""", _lowerCAmelCase )
trainer.save_metrics("""eval""", _lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = trainer.predict(_lowerCAmelCase, metric_key_prefix="""predict""" )
_UpperCAmelCase : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCAmelCase )
)
_UpperCAmelCase : Tuple = min(_lowerCAmelCase, len(_lowerCAmelCase ) )
trainer.log_metrics("""predict""", _lowerCAmelCase )
trainer.save_metrics("""predict""", _lowerCAmelCase )
_UpperCAmelCase : str = np.argmax(_lowerCAmelCase, axis=1 )
_UpperCAmelCase : Any = os.path.join(training_args.output_dir, """predictions.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase, """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 238 | 1 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __A : dict[str, list[str]] , __A : str ) -> None:
'''simple docstring'''
lowerCAmelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ = {}
lowerCAmelCase__ = source_vertex
def lowercase__ ( self : Any ) -> None:
'''simple docstring'''
lowerCAmelCase__ = {self.source_vertex}
lowerCAmelCase__ = None
lowerCAmelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__A )
lowerCAmelCase__ = vertex
queue.append(__A )
def lowercase__ ( self : str , __A : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ = self.parent.get(__A )
if target_vertex_parent is None:
lowerCAmelCase__ = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__A )
return self.shortest_path(__A ) + f'''->{target_vertex}'''
if __name__ == "__main__":
_UpperCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 211 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_UpperCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_UpperCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowercase__ ( self : Optional[int] , __A : str , __A : Optional[Any] , __A : Dict=None , __A : Union[str, Any]=1 , __A : Tuple="binary" , __A : str=None ) -> str:
'''simple docstring'''
lowerCAmelCase__ = fa_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A )
return {"f1": float(__A ) if score.size == 1 else score}
| 211 | 1 |
"""simple docstring"""
from math import ceil
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[str] = list(range(0 , _UpperCAmelCase ) )
lowerCAmelCase_ : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCAmelCase_ : int = []
for i in device_map_blocks:
if device_map_blocks.count(_UpperCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_UpperCAmelCase )
# Missing blocks
lowerCAmelCase_ : Optional[Any] = [i for i in blocks if i not in device_map_blocks]
lowerCAmelCase_ : Any = [i for i in device_map_blocks if i not in blocks]
if len(_UpperCAmelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_UpperCAmelCase ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = list(range(_UpperCAmelCase ) )
lowerCAmelCase_ : Tuple = int(ceil(n_layers / len(_UpperCAmelCase ) ) )
lowerCAmelCase_ : Tuple = [layers[i : i + n_blocks] for i in range(0 , _UpperCAmelCase , _UpperCAmelCase )]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
| 610 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SNAKE_CASE_: Node | None = None
class __lowercase :
"""simple docstring"""
def __init__( self : int):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = None
def __iter__( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_: List[str] = node.next
if node == self.head:
break
def __len__( self : Dict):
return sum(1 for _ in self)
def __repr__( self : Dict):
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(len(self) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(0 , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any):
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__)
if self.head is None:
SCREAMING_SNAKE_CASE_: str = new_node # first node points itself
SCREAMING_SNAKE_CASE_: Optional[Any] = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
SCREAMING_SNAKE_CASE_: str = new_node
else:
SCREAMING_SNAKE_CASE_: int = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: List[str] = temp.next
SCREAMING_SNAKE_CASE_: int = new_node
if index == len(self) - 1: # insert at tail
SCREAMING_SNAKE_CASE_: Any = new_node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.delete_nth(0)
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.delete_nth(len(self) - 1)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0):
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_: List[str] = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_: int = self.tail.next.next
SCREAMING_SNAKE_CASE_: Tuple = self.head.next
else:
SCREAMING_SNAKE_CASE_: Optional[int] = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Any = temp.next
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: int = temp.next.next
if index == len(self) - 1: # delete at tail
SCREAMING_SNAKE_CASE_: int = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return len(self) == 0
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList()
assert len(_UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCAmelCase ) == i
circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a : Dict = logging.getLogger()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = {}
UpperCAmelCase : Optional[Any] = os.path.join(__magic_name__ , "all_results.json" )
if os.path.exists(__magic_name__ ):
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : List[Any] = json.load(__magic_name__ )
else:
raise ValueError(F"can't find {path}" )
return results
a : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
import xla_spawn
UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Tuple = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
UpperCAmelCase : List[str] = time()
xla_spawn.main()
UpperCAmelCase : Dict = time()
UpperCAmelCase : Any = get_results(snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def A_ ( self ):
'''simple docstring'''
import xla_spawn
UpperCAmelCase : Optional[Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
xla_spawn.main()
| 609 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=9_9 , snake_case=0 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Any = use_input_lengths
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : Any = gelu_activation
UpperCAmelCase : str = sinusoidal_embeddings
UpperCAmelCase : List[Any] = causal
UpperCAmelCase : Union[str, Any] = asm
UpperCAmelCase : List[str] = n_langs
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : str = n_special
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[int] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : Dict = summary_type
UpperCAmelCase : Dict = use_proj
UpperCAmelCase : List[Any] = scope
UpperCAmelCase : Optional[int] = bos_token_id
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(snake_case , lengths=snake_case , langs=snake_case )
UpperCAmelCase : Any = model(snake_case , langs=snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : int = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case )
UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case )
UpperCAmelCase : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Union[str, Any] = model(snake_case )
UpperCAmelCase : List[str] = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
UpperCAmelCase : Optional[Any] = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((UpperCAmelCase) , ) : str = result_with_labels.to_tuple()
UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((UpperCAmelCase) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case )
UpperCAmelCase : int = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_labels
UpperCAmelCase : Optional[int] = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Tuple = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
UpperCAmelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = XLMModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=snake_case , emb_dim=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
'''simple docstring'''
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
UpperCAmelCase : str = min_length + idx + 1
UpperCAmelCase : List[Any] = min_length + idx + 1
UpperCAmelCase : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
'''simple docstring'''
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
UpperCAmelCase : List[Any] = min_length + idx + 1
UpperCAmelCase : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case )
UpperCAmelCase : Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case ) # the president
UpperCAmelCase : Tuple = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase : Dict = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 609 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_: Any ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case : List[Any] = list(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
snake_case : List[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int ):
"""simple docstring"""
snake_case : str = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] = None , lowerCamelCase_: Dict = 1_2_8 ):
"""simple docstring"""
if function is None:
return functools.partial(__UpperCAmelCase , starting_batch_size=__UpperCAmelCase )
snake_case : Optional[Any] = starting_batch_size
def decorator(*lowerCamelCase_: List[Any] , **lowerCamelCase_: Optional[int] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case : Optional[int] = list(inspect.signature(__UpperCAmelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCAmelCase ) < (len(__UpperCAmelCase ) + 1):
snake_case : List[str] = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 449 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_A = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_A = get_tests_dir("""fixtures/vocab.json""")
_A = get_tests_dir("""fixtures""")
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 0
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Any = WavaVecaConfig()
lowerCAmelCase__ : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , """vocab.json""" ) )
lowerCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor()
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(UpperCamelCase , UpperCamelCase )
# save in new folder
processor.save_pretrained(UpperCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """r""" ) as f:
lowerCAmelCase__ : Optional[int] = json.load(UpperCamelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write(json.dumps(UpperCamelCase ) )
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : int = WavaVecaFeatureExtractor()
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(UpperCamelCase , UpperCamelCase )
# save in new folder
processor.save_pretrained(UpperCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """r""" ) as f:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write(json.dumps(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(UpperCamelCase )
# copy relevant files
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write("""{}""" )
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase ):
lowerCAmelCase__ : int = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
lowerCAmelCase__ : Any = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowerCAmelCase__ : str = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowerCAmelCase__ : Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoFeatureExtractor.register(UpperCamelCase , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : str = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """vocab.txt""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase__ : Union[str, Any] = CustomTokenizer(UpperCamelCase )
lowerCAmelCase__ : List[str] = CustomProcessor(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : str = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = False
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = False
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = "AutoFeatureExtractor"
_lowerCamelCase :Tuple = "AutoTokenizer"
_lowerCamelCase :Union[str, Any] = False
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoFeatureExtractor.register(UpperCamelCase , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase__ : str = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowerCAmelCase ( cls : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Dict ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = WavaVecaProcessor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase , """test-processor""" ) , push_to_hub=UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(new_processor.feature_extractor , UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase , """test-processor-org""" ) , push_to_hub=UpperCamelCase , use_auth_token=self._token , organization="""valid_org""" , )
lowerCAmelCase__ : List[Any] = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(new_processor.feature_extractor , UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase__ : int = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab.txt""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase__ : int = CustomTokenizer(UpperCamelCase )
lowerCAmelCase__ : List[Any] = CustomProcessor(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowerCAmelCase__ : List[str] = Repository(UpperCamelCase , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(UpperCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) ) as f:
lowerCAmelCase__ : List[Any] = json.load(UpperCamelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowerCAmelCase__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 299 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( _snake_case ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any]):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self : List[Any] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE_ : int = audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE_ : Optional[int] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
SCREAMING_SNAKE_CASE_ : List[str] = int(lowercase_)
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
''' process.''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = next(iter(self.unet.parameters())).dtype
SCREAMING_SNAKE_CASE_ : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase_)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
SCREAMING_SNAKE_CASE_ : List[str] = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device)
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.timesteps.to(lowercase_)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ : Tuple = self.unet(lowercase_ , lowercase_).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = audio.clamp(-1 , 1).float().cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_)
| 713 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=0.2 , lowercase_ : Dict=0.2):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE_ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE_ : List[Any] = conva_get[:2]
SCREAMING_SNAKE_CASE_ : Optional[int] = conva_get[2]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size_pa
SCREAMING_SNAKE_CASE_ : Tuple = rate_w
SCREAMING_SNAKE_CASE_ : List[Any] = rate_t
SCREAMING_SNAKE_CASE_ : Tuple = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ : Optional[int] = -2 * np.random.rand(self.conva[1]) + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
SCREAMING_SNAKE_CASE_ : Any = -2 * np.random.rand(self.num_bpa) + 1
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(lowercase_ , '''wb''') as f:
pickle.dump(lowercase_ , lowercase_)
print(F'Model saved: {save_path}')
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , lowercase_ : Optional[int]):
'''simple docstring'''
with open(lowercase_ , '''rb''') as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = pickle.load(lowercase_) # noqa: S301
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_dic.get('''conv1''')
conv_get.append(model_dic.get('''step_conv1'''))
SCREAMING_SNAKE_CASE_ : Optional[int] = model_dic.get('''size_pooling1''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_dic.get('''num_bp1''')
SCREAMING_SNAKE_CASE_ : List[str] = model_dic.get('''num_bp2''')
SCREAMING_SNAKE_CASE_ : List[Any] = model_dic.get('''num_bp3''')
SCREAMING_SNAKE_CASE_ : Any = model_dic.get('''rate_weight''')
SCREAMING_SNAKE_CASE_ : str = model_dic.get('''rate_thre''')
# create model instance
SCREAMING_SNAKE_CASE_ : str = CNN(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# modify model parameter
SCREAMING_SNAKE_CASE_ : List[Any] = model_dic.get('''w_conv1''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_dic.get('''wkj''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_dic.get('''vji''')
SCREAMING_SNAKE_CASE_ : Tuple = model_dic.get('''thre_conv1''')
SCREAMING_SNAKE_CASE_ : Dict = model_dic.get('''thre_bp2''')
SCREAMING_SNAKE_CASE_ : Any = model_dic.get('''thre_bp3''')
return conv_ins
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[Any]):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str]):
'''simple docstring'''
return round(lowercase_ , 3)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = convs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = convs[1]
SCREAMING_SNAKE_CASE_ : List[str] = np.shape(lowercase_)[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE_ : Dict = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase_):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase_):
SCREAMING_SNAKE_CASE_ : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase_)
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i_focus in range(len(lowercase_)):
SCREAMING_SNAKE_CASE_ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase_))
SCREAMING_SNAKE_CASE_ : List[Any] = np.asmatrix(lowercase_).reshape(
lowercase_ , lowercase_)
data_featuremap.append(lowercase_)
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE_ : Dict = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.asarray(lowercase_)
return focus_list, data_featuremap
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : str , lowercase_ : Any="average_pool"):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(featuremaps[0])
SCREAMING_SNAKE_CASE_ : Any = int(size_map / size_pooling)
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i_map in range(len(lowercase_)):
SCREAMING_SNAKE_CASE_ : int = featuremaps[i_map]
SCREAMING_SNAKE_CASE_ : List[str] = []
for i_focus in range(0 , lowercase_ , lowercase_):
for j_focus in range(0 , lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase_))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase_))
SCREAMING_SNAKE_CASE_ : str = np.asmatrix(lowercase_).reshape(lowercase_ , lowercase_)
featuremap_pooled.append(lowercase_)
return featuremap_pooled
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(len(lowercase_)):
SCREAMING_SNAKE_CASE_ : List[str] = np.shape(data[i])
SCREAMING_SNAKE_CASE_ : Dict = data[i].reshape(1 , shapes[0] * shapes[1])
SCREAMING_SNAKE_CASE_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase_)
return data_expanded
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = np.asarray(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.shape(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for i_map in range(lowercase_):
SCREAMING_SNAKE_CASE_ : int = np.ones((size_map, size_map))
for i in range(0 , lowercase_ , lowercase_):
for j in range(0 , lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Any = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE_ : List[str] = i_pool + 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.multiply(
lowercase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(lowercase_)
return pd_all
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Dict=bool):
'''simple docstring'''
print('''----------------------Start Training-------------------------''')
print((''' - - Shape: Train_Data ''', np.shape(lowercase_)))
print((''' - - Shape: Teach_Data ''', np.shape(lowercase_)))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
print(F'-------------Learning Time {rp}--------------')
for p in range(len(lowercase_)):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE_ : Tuple = np.asmatrix(datas_train[p])
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.asarray(datas_teach[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ : int = self.pooling(lowercase_ , self.size_poolinga)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.shape(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._expand(lowercase_)
SCREAMING_SNAKE_CASE_ : int = data_bp_input
SCREAMING_SNAKE_CASE_ : int = np.dot(lowercase_ , self.vji.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ : Dict = self.sig(lowercase_)
SCREAMING_SNAKE_CASE_ : str = np.dot(lowercase_ , self.wkj.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ : List[str] = self.sig(lowercase_)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE_ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase_ , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ : Tuple = np.multiply(
np.dot(lowercase_ , self.wkj) , np.multiply(lowercase_ , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ : Optional[int] = np.dot(lowercase_ , self.vji)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE_ : Tuple = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE_ : Optional[int] = self._calculate_gradient_from_pool(
lowercase_ , lowercase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
SCREAMING_SNAKE_CASE_ : List[Any] = self._expand_mat(pd_conva_all[k_conv])
SCREAMING_SNAKE_CASE_ : int = self.rate_weight * np.dot(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rp + 1
SCREAMING_SNAKE_CASE_ : Tuple = error_count / patterns
all_mse.append(lowercase_)
def draw_error():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(lowercase_ , '''+-''')
plt.plot(lowercase_ , '''r--''')
plt.xlabel('''Learning Times''')
plt.ylabel('''All_mse''')
plt.grid(lowercase_ , alpha=0.5)
plt.show()
print('''------------------Training Complished---------------------''')
print((''' - - Training epoch: ''', rp, F' - - Mse: {mse:.6f}'))
if draw_e:
draw_error()
return mse
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
print('''-------------------Start Testing-------------------------''')
print((''' - - Shape: Test_Data ''', np.shape(lowercase_)))
for p in range(len(lowercase_)):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.asmatrix(datas_test[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ : Dict = self.pooling(lowercase_ , self.size_poolinga)
SCREAMING_SNAKE_CASE_ : List[Any] = self._expand(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = data_bp_input
SCREAMING_SNAKE_CASE_ : Dict = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ : str = self.sig(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ : Dict = self.sig(lowercase_)
produce_out.extend(bp_outa.getA().tolist())
SCREAMING_SNAKE_CASE_ : Optional[Any] = [list(map(self.do_round , lowercase_)) for each in produce_out]
return np.asarray(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asmatrix(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ : int = self.pooling(lowercase_ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 176 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase_( __magic_name__ : Iterable[str] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = iter(__magic_name__ )
while True:
_lowerCAmelCase :List[Any] = tuple(itertools.islice(__magic_name__ , __magic_name__ ) )
if not chunk:
return
yield chunk
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowerCAmelCase :Union[str, Any] = ''
if len(__magic_name__ ) < 2:
return dirty
for i in range(len(__magic_name__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__magic_name__ ) & 1:
clean += "X"
return clean
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowerCAmelCase :List[str] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__magic_name__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__magic_name__ )
return table
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Tuple = generate_table(__magic_name__ )
_lowerCAmelCase :List[str] = prepare_input(__magic_name__ )
_lowerCAmelCase :List[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ , 2 ):
_lowerCAmelCase , _lowerCAmelCase :int = divmod(table.index(__magic_name__ ) , 5 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = divmod(table.index(__magic_name__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = generate_table(__magic_name__ )
_lowerCAmelCase :str = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ , 2 ):
_lowerCAmelCase , _lowerCAmelCase :List[str] = divmod(table.index(__magic_name__ ) , 5 )
_lowerCAmelCase , _lowerCAmelCase :List[str] = divmod(table.index(__magic_name__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowercase :
def __init__( self , lowercase ) -> List[Any]:
lowerCAmelCase = data
lowerCAmelCase = None
lowerCAmelCase = None
def UpperCAmelCase__ ( ):
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
lowerCAmelCase = queue.Queue()
lowerCAmelCase = TreeNode(int(snake_case__ ) )
q.put(snake_case__ )
while not q.empty():
lowerCAmelCase = q.get()
lowerCAmelCase = F'Enter the left node of {node_found.data}: '
lowerCAmelCase = input(snake_case__ ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase = TreeNode(int(snake_case__ ) )
lowerCAmelCase = left_node
q.put(snake_case__ )
lowerCAmelCase = F'Enter the right node of {node_found.data}: '
lowerCAmelCase = input(snake_case__ ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase = TreeNode(int(snake_case__ ) )
lowerCAmelCase = right_node
q.put(snake_case__ )
raise
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
lowerCAmelCase = queue.Queue()
q.put(snake_case__ )
while not q.empty():
lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
lowerCAmelCase = queue.Queue()
q.put(snake_case__ )
while not q.empty():
lowerCAmelCase = []
while not q.empty():
lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case__ )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
lowerCAmelCase = []
lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case__ )
lowerCAmelCase = n.left
# end of while means current node doesn't have left child
lowerCAmelCase = stack.pop()
# start to traverse its right child
lowerCAmelCase = n.right
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
lowerCAmelCase = []
lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case__ )
lowerCAmelCase = n.left
lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
lowerCAmelCase = n.right
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
lowerCAmelCase = [], []
lowerCAmelCase = node
stacka.append(snake_case__ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str = "" , SCREAMING_SNAKE_CASE : List[str]=50 , SCREAMING_SNAKE_CASE : str="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
lowerCAmelCase = divmod(width - len(snake_case__ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
SCREAMING_SNAKE_CASE__ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 706 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'ibert'
def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=False , lowercase="none" , **lowercase , ) -> str:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = quant_mode
lowerCAmelCase = force_dequant
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 393 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase ( lowercase : int , lowercase : Optional[int] , lowercase : Any=None , lowercase : str=None ) ->List[Any]:
"""simple docstring"""
if attention_mask is None:
lowercase__ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __A :
"""simple docstring"""
A_ = OPTConfig
A_ = {}
A_ = 'gelu'
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=2_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=1_6 , _lowerCamelCase=1_6 , )-> Optional[Any]:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = embed_dim
lowercase__ = word_embed_proj_dim
lowercase__ = False
def snake_case_( self )-> str:
lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_lowerCamelCase , **self.config_updates , )
lowercase__ = prepare_opt_inputs_dict(_lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = TFOPTModel(config=_lowerCamelCase )
lowercase__ = inputs_dict['''input_ids''']
lowercase__ = input_ids[:1, :]
lowercase__ = inputs_dict['''attention_mask'''][:1, :]
lowercase__ = 1
# first forward pass
lowercase__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
lowercase__ , lowercase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
lowercase__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__ = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
A_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A_ = (TFOPTForCausalLM,) if is_tf_available() else ()
A_ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
A_ = False
A_ = False
A_ = False
A_ = 1_0
def snake_case_( self )-> Dict:
lowercase__ = TFOPTModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowerCamelCase )
def snake_case_( self )-> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self )-> List[str]:
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
def snake_case_( self )-> Tuple:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowercase__ = model_class(config=_lowerCamelCase )
lowercase__ = _get_word_embedding_weight(_lowerCamelCase , model.get_input_embeddings() )
lowercase__ = _get_word_embedding_weight(_lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_lowerCamelCase )
lowercase__ = _get_word_embedding_weight(_lowerCamelCase , model.get_input_embeddings() )
lowercase__ = _get_word_embedding_weight(_lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowercase__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _lowerCamelCase )
# check that weights remain the same after resizing
lowercase__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ = False
self.assertTrue(_lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _lowerCamelCase )
lowercase__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ = False
self.assertTrue(_lowerCamelCase )
def _lowerCAmelCase ( lowercase : Any ) ->Optional[Any]:
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
A_ = 9_9
def snake_case_( self )-> Any:
lowercase__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowercase__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowercase__ = input_ids.shape[0]
lowercase__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_( self )-> Optional[int]:
lowercase__ = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
lowercase__ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowercase__ = tf.not_equal(_lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
lowercase__ = model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase ).last_hidden_state
lowercase__ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , _lowerCamelCase )
lowercase__ = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCamelCase , atol=4e-3 ) )
lowercase__ = tf.function(_lowerCamelCase , jit_compile=_lowerCamelCase )
lowercase__ = xla_generate(_lowerCamelCase , _lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCamelCase , atol=4e-2 ) )
@require_tf
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Optional[int]:
super().setUp()
lowercase__ = '''facebook/opt-350m'''
def snake_case_( self )-> str:
lowercase__ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowercase__ = GPTaTokenizer.from_pretrained(self.path_model )
lowercase__ = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowercase__ = tokenizer(_lowerCamelCase , return_tensors='''tf''' , padding=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
lowercase__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowercase__ = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-4 ) )
lowercase__ = tf.function(_lowerCamelCase , jit_compile=_lowerCamelCase )
lowercase__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-4 ) )
@require_tf
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_( self )-> Optional[int]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_( self )-> List[Any]:
lowercase__ = '''facebook/opt-125m'''
lowercase__ = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowercase__ = []
lowercase__ = GPTaTokenizer.from_pretrained(_lowerCamelCase )
lowercase__ = TFOPTForCausalLM.from_pretrained(_lowerCamelCase )
for prompt in self.prompts:
lowercase__ = tokenizer(_lowerCamelCase , return_tensors='''tf''' ).input_ids
lowercase__ = model.generate(_lowerCamelCase , max_length=1_0 )
lowercase__ = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> Dict:
lowercase__ = '''facebook/opt-350m'''
lowercase__ = GPTaTokenizer.from_pretrained(_lowerCamelCase )
lowercase__ = TFOPTForCausalLM.from_pretrained(_lowerCamelCase )
lowercase__ = '''left'''
# use different length sentences to test batching
lowercase__ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowercase__ = tokenizer(_lowerCamelCase , return_tensors='''tf''' , padding=_lowerCamelCase )
lowercase__ = inputs['''input_ids''']
lowercase__ = model.generate(input_ids=_lowerCamelCase , attention_mask=inputs['''attention_mask'''] )
lowercase__ = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
lowercase__ = model.generate(input_ids=_lowerCamelCase )
lowercase__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
lowercase__ = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
lowercase__ = model.generate(input_ids=_lowerCamelCase , max_length=model.config.max_length - num_paddings )
lowercase__ = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
lowercase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase )
lowercase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase )
lowercase__ = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] )
def snake_case_( self )-> int:
lowercase__ = '''facebook/opt-350m'''
lowercase__ = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowercase__ = []
lowercase__ = GPTaTokenizer.from_pretrained(_lowerCamelCase )
lowercase__ = TFOPTForCausalLM.from_pretrained(_lowerCamelCase )
for prompt in self.prompts:
lowercase__ = tokenizer(_lowerCamelCase , return_tensors='''tf''' ).input_ids
lowercase__ = model.generate(_lowerCamelCase , max_length=1_0 )
lowercase__ = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 161 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowerCAmelCase = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowerCAmelCase = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> Dict:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , )-> List[Any]:
lowercase__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
lowercase__ = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
lowercase__ = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 161 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_snake_case = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_snake_case = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( __magic_name__ ):
lowercase__ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ = numpy_to_pil(__magic_name__ )
return images
def _A ( __magic_name__ ):
if images.ndim == 3:
lowercase__ = images[None, ...]
lowercase__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase__ = [Image.fromarray(__magic_name__ ) for image in images]
return pil_images
| 720 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Optional[int] , *_lowercase :Tuple , **_lowercase :Optional[Any] ):
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 611 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase (_SCREAMING_SNAKE_CASE :Dict ):
SCREAMING_SNAKE_CASE : str = 3_84
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : int = [3, 3, 9, 3]
SCREAMING_SNAKE_CASE : Any = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE : int = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE : Dict = [1_28, 2_56, 5_12, 10_24]
SCREAMING_SNAKE_CASE : Any = 5_12
if "large" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE : Union[str, Any] = [1_92, 3_84, 7_68, 15_36]
SCREAMING_SNAKE_CASE : Tuple = 7_68
if "xlarge" in model_name:
SCREAMING_SNAKE_CASE : Dict = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE : List[Any] = [2_56, 5_12, 10_24, 20_48]
SCREAMING_SNAKE_CASE : int = 10_24
# set label information
SCREAMING_SNAKE_CASE : List[Any] = 1_50
SCREAMING_SNAKE_CASE : str = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
SCREAMING_SNAKE_CASE : List[Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __lowercase (_SCREAMING_SNAKE_CASE :Any ):
SCREAMING_SNAKE_CASE : Tuple = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Any ):
SCREAMING_SNAKE_CASE : Dict = dct.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = val
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
SCREAMING_SNAKE_CASE : List[Any] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''state_dict''']
SCREAMING_SNAKE_CASE : Tuple = get_upernet_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
SCREAMING_SNAKE_CASE : str = key.replace('''bn''' , '''batch_norm''' )
SCREAMING_SNAKE_CASE : List[str] = val
# rename keys
SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
SCREAMING_SNAKE_CASE : int = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Dict = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : Any = processor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 507 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 507 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case , __snake_case : Any = 9, 1_4 # noqa: F841
__snake_case : Any = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
__snake_case : List[Any] = defaultdict(SCREAMING_SNAKE_CASE_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__snake_case : Optional[int] = mst(SCREAMING_SNAKE_CASE_ )
__snake_case : int = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__snake_case : str = tuple(answer[:2] )
__snake_case : Optional[int] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 715 | def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : int = 1
__snake_case : Any = 2
while i * i <= n:
__snake_case : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = 1
__snake_case : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(__SCREAMING_SNAKE_CASE ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 390 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Union[str, Any] = model_type_to_module_name(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Any = importlib.import_module(F'''.{module_name}''' ,"transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE__ ,"__name__" ,SCREAMING_SNAKE_CASE__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : Tuple = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return None
def lowercase__ ( lowercase_ ,lowercase_ = None ,lowercase_ = False ,lowercase_ = False ,lowercase_ = None ,lowercase_ = None ,lowercase_ = None ,lowercase_ = False ,**lowercase_ ,) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : int = get_file_from_repo(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,cache_dir=SCREAMING_SNAKE_CASE__ ,force_download=SCREAMING_SNAKE_CASE__ ,resume_download=SCREAMING_SNAKE_CASE__ ,proxies=SCREAMING_SNAKE_CASE__ ,use_auth_token=SCREAMING_SNAKE_CASE__ ,revision=SCREAMING_SNAKE_CASE__ ,local_files_only=SCREAMING_SNAKE_CASE__ ,)
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE__ ,encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str ) -> Optional[int]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( cls : str , __a : List[Any] , **__a : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = kwargs.pop("config" , _UpperCamelCase )
_UpperCamelCase : List[str] = kwargs.pop("trust_remote_code" , _UpperCamelCase )
_UpperCamelCase : Tuple = True
_UpperCamelCase, _UpperCamelCase : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase )
_UpperCamelCase : Any = config_dict.get("feature_extractor_type" , _UpperCamelCase )
_UpperCamelCase : List[str] = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCamelCase : List[str] = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.feature_extractor_type``
_UpperCamelCase : Tuple = getattr(_UpperCamelCase , "feature_extractor_type" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
_UpperCamelCase : List[Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
_UpperCamelCase : List[Any] = feature_extractor_class_from_name(_UpperCamelCase )
_UpperCamelCase : Any = feature_extractor_auto_map is not None
_UpperCamelCase : str = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
_UpperCamelCase : Optional[int] = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
_UpperCamelCase : Optional[int] = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_UpperCamelCase : Optional[Any] = kwargs.pop("code_revision" , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
_UpperCamelCase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )]
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Optional[int] , __a : Optional[int] ) -> Dict:
FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 624 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowercase__ ( __A ):
def __init__( self , *_lowercase , **_lowercase ):
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 709 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : str = logging.get_logger(__name__)
@dataclass
class lowercase__ :
__UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
__UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase = field(
default=__A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = self.task_name.lower()
class lowercase__ ( __A ):
__UpperCamelCase = """train"""
__UpperCamelCase = """dev"""
__UpperCamelCase = """test"""
class lowercase__ ( __A ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowercase , )
lowerCAmelCase_ : Any = args
lowerCAmelCase_ : List[str] = glue_processors[args.task_name]()
lowerCAmelCase_ : Tuple = glue_output_modes[args.task_name]
if isinstance(_lowercase , _lowercase ):
try:
lowerCAmelCase_ : Dict = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowerCAmelCase_ : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase_ : List[str] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = label_list[2], label_list[1]
lowerCAmelCase_ : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ : Optional[int] = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowerCAmelCase_ : Dict = time.time()
lowerCAmelCase_ : str = torch.load(_lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase_ : Dict = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase_ : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase_ : Optional[int] = examples[:limit_length]
lowerCAmelCase_ : Any = glue_convert_examples_to_features(
_lowercase , _lowercase , max_length=args.max_seq_length , label_list=_lowercase , output_mode=self.output_mode , )
lowerCAmelCase_ : str = time.time()
torch.save(self.features , _lowercase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _lowercase ):
return self.features[i]
def UpperCAmelCase__ ( self ):
return self.label_list
| 440 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Optional[int]=13 , _snake_case : List[Any]=7 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=True , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=99 , _snake_case : Tuple=32 , _snake_case : str=5 , _snake_case : List[Any]=4 , _snake_case : Dict=37 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.1 , _snake_case : str=0.1 , _snake_case : Tuple=512 , _snake_case : Any=16 , _snake_case : List[str]=2 , _snake_case : Optional[Any]=0.02 , _snake_case : Optional[Any]=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_attention_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_choices
def lowerCAmelCase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase (__snake_case , unittest.TestCase ):
'''simple docstring'''
a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase_ ( self : Dict ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
@require_flax
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case )[0]
SCREAMING_SNAKE_CASE__ = (1, 11, 768)
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE__ = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4 ) )
| 159 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowercase :
_A = None
_A = False
_A = False
_A = False
_A = None
_A = None
_A = False
_A = False
_A = False
_A = True
_A = None
_A = 1
_A = None
_A = False
_A = None
_A = None
def _a(self : str ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(snake_case ) for k, v in self.__dict__.items()} )
| 461 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Union[str, Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 | import collections
import os
import re
from pathlib import Path
__lowerCAmelCase : Tuple = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : Union[str, Any] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : str = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : List[str] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : Optional[int] = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : Dict = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : Optional[int] = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Optional[int] = re.compile(R'^\s*else:')
def a_ (_lowerCAmelCase : str )-> Optional[int]:
if _re_test_backend.search(_lowerCAmelCase ) is None:
return None
snake_case: Optional[Any] = [b[0] for b in _re_backend.findall(_lowerCAmelCase )]
backends.sort()
return "_and_".join(_lowerCAmelCase )
def a_ (_lowerCAmelCase : Union[str, Any] )-> Union[str, Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case: Dict = f.readlines()
snake_case: Optional[int] = 0
while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case: Optional[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCAmelCase ):
snake_case: int = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0]
snake_case: Any = re.findall(R"""\[([^\]]+)\]""" , _lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case: Union[str, Any] = _re_import_struct_key_value.search(_lowerCAmelCase )
if single_line_import_search is not None:
snake_case: Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case: int = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case: str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case: List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case: Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None:
snake_case: Optional[int] = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(""", """ )
snake_case: str = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_between_brackets.search(_lowerCAmelCase ) is not None:
snake_case: Union[str, Any] = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(""", """ )
snake_case: int = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_quote_object.search(_lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case: Optional[Any] = []
while (
line_index < len(_lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case: int = lines[line_index]
snake_case: str = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case: Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case: Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case: List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case: Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case: List[str] = lines[line_index]
snake_case: str = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case: Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] )-> Optional[int]:
def find_duplicates(_lowerCAmelCase : Union[str, Any] ):
return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case: Optional[Any] = []
for key in import_dict_objects.keys():
snake_case: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
snake_case: Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case: Optional[Any] = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a_ ()-> int:
snake_case: Optional[int] = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case: Optional[int] = os.path.join(_lowerCAmelCase , """__init__.py""" )
snake_case: Any = parse_init(_lowerCAmelCase )
if objects is not None:
snake_case: List[str] = analyze_results(*_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
snake_case: Optional[int] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCAmelCase ) )
def a_ ()-> Dict:
snake_case: Any = []
for path, directories, files in os.walk(_lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case: Optional[int] = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) )
snake_case: str = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case: Union[str, Any] = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) )
snake_case: List[Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCAmelCase )
return submodules
__lowerCAmelCase : Optional[int] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def a_ ()-> Dict:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case: Union[str, Any] = direct_transformers_import(_lowerCAmelCase )
snake_case: str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_lowerCAmelCase , """__init__.py""" ) , """r""" ) as f:
snake_case: Optional[int] = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _lowerCAmelCase ) ) )
snake_case: Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_lowerCAmelCase ) > 0:
snake_case: Any = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 164 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = []
def __magic_name__ ( _lowerCamelCase: list[list[int]], _lowerCamelCase: int, _lowerCamelCase: int ) -> bool:
'''simple docstring'''
for i in range(len(_lowerCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_lowerCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_lowerCamelCase, -1, -1 ), range(_lowerCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_lowerCamelCase, -1, -1 ), range(_lowerCamelCase, len(_lowerCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __magic_name__ ( _lowerCamelCase: list[list[int]], _lowerCamelCase: int ) -> bool:
'''simple docstring'''
if row >= len(_lowerCamelCase ):
solution.append(_lowerCamelCase )
printboard(_lowerCamelCase )
print()
return True
for i in range(len(_lowerCamelCase ) ):
if is_safe(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ):
lowerCAmelCase = 1
solve(_lowerCamelCase, row + 1 )
lowerCAmelCase = 0
return False
def __magic_name__ ( _lowerCamelCase: list[list[int]] ) -> None:
'''simple docstring'''
for i in range(len(_lowerCamelCase ) ):
for j in range(len(_lowerCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase = 8
UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 535 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = [1]
for i in range(2, _lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase = []
lowerCAmelCase = list(range(_lowerCamelCase ) )
# Find permutation
while factorials:
lowerCAmelCase = factorials.pop()
lowerCAmelCase , lowerCAmelCase = divmod(_lowerCamelCase, _lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535 | 1 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
A__ : Optional[Any] =args.pruning_method
A__ : Any =args.threshold
A__ : Dict =args.model_name_or_path.rstrip("""/""" )
A__ : List[Any] =args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
A__ : Any =torch.load(os.path.join(_snake_case, """pytorch_model.bin""" ) )
A__ : Union[str, Any] ={}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ : Any =tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
A__ : List[Any] =tensor
print(f"Copied layer {name}" )
elif "bias" in name:
A__ : Optional[int] =tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
A__ : List[str] =MagnitudeBinarizer.apply(inputs=_snake_case, threshold=_snake_case )
A__ : Any =tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ : Union[str, Any] =name[:-6]
A__ : str =model[f"{prefix_}mask_scores"]
A__ : Tuple =TopKBinarizer.apply(_snake_case, _snake_case )
A__ : Optional[int] =tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ : Optional[int] =name[:-6]
A__ : List[Any] =model[f"{prefix_}mask_scores"]
A__ : int =ThresholdBinarizer.apply(_snake_case, _snake_case, _snake_case )
A__ : Optional[int] =tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ : Optional[int] =name[:-6]
A__ : List[Any] =model[f"{prefix_}mask_scores"]
A__ , A__ : Optional[int] =-0.1, 1.1
A__ : Dict =torch.sigmoid(_snake_case )
A__ : Union[str, Any] =s * (r - l) + l
A__ : Any =s_bar.clamp(min=0.0, max=1.0 )
A__ : Union[str, Any] =tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
A__ : Optional[int] =os.path.join(
os.path.dirname(_snake_case ), f"bertarized_{os.path.basename(_snake_case )}" )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case, _snake_case )
print(f"\nCreated folder {target_model_path}" )
torch.save(_snake_case, os.path.join(_snake_case, """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__snake_case : Optional[int] = parser.parse_args()
main(args)
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str=3 , _UpperCamelCase : List[Any]=3_2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Union[str, Any]=1_0 , _UpperCamelCase : Any=[8, 1_6, 3_2, 6_4] , _UpperCamelCase : Any=[1, 1, 2, 1] , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Any="relu" , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Any=None , _UpperCamelCase : Dict=["stage2", "stage3", "stage4"] , _UpperCamelCase : str=[2, 3, 4] , _UpperCamelCase : List[str]=1 , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(_UpperCamelCase )
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = num_groups
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : Tuple ) ->List[str]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any ) ->Tuple:
snake_case_ = BitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = BitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : str ) ->Optional[Any]:
snake_case_ = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = BitModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__( self : Any ) ->Union[str, Any]:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__( self : int ) ->Tuple:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__( self : str ) ->Optional[Any]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__( self : Union[str, Any] ) ->Optional[Any]:
pass
def snake_case__( self : Optional[Any] ) ->Optional[int]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->int:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_UpperCamelCase )
for name, module in model.named_modules():
if isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case__( self : Union[str, Any] ) ->str:
def check_hidden_states_output(_UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ):
snake_case_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ = layer_type
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__( self : str ) ->Optional[Any]:
pass
def snake_case__( self : Dict ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def snake_case__( self : Optional[int] ) ->Any:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : Optional[Any] ) ->int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__( self : str ) ->Optional[int]:
snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = BitConfig
SCREAMING_SNAKE_CASE : Optional[int] = False
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BitModelTester(self ) | 39 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ = '''naver-clova-ix/donut-base'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = DonutProcessor.from_pretrained(_UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case_ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case_ = self.processor.tokenajson(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , _UpperCamelCase ) | 39 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ShapEPipeline
UpperCamelCase_ = ['''prompt''']
UpperCamelCase_ = ['''prompt''']
UpperCamelCase_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCAmelCase = PriorTransformer(**_lowerCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase = ShapERenderer(**_lowerCAmelCase )
return model
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
_lowerCAmelCase = self.dummy_prior
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_renderer
_lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowerCAmelCase = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=0 ) -> Optional[Any]:
if str(_lowerCAmelCase ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = torch_device == 'cpu'
_lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase = batch_size * [inputs[key]]
_lowerCAmelCase = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowerCAmelCase = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 585 |
from PIL import Image
def _a ( __SCREAMING_SNAKE_CASE : Image ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__SCREAMING_SNAKE_CASE ):
for i in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase: List[Any] =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 585 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : int , __snake_case : List[Any] , __snake_case : Union[str, Any]=7 , __snake_case : List[Any]=3 , __snake_case : Tuple=18 , __snake_case : Any=30 , __snake_case : Optional[int]=400 , __snake_case : Any=True , __snake_case : int=None , __snake_case : Dict=True , __snake_case : Tuple=None , __snake_case : Dict=True , __snake_case : str=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __snake_case : Any=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __snake_case : List[str]=True , ):
lowerCamelCase :Dict = size if size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase :List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :Union[str, Any] = batch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[str] = image_size
lowerCamelCase :Union[str, Any] = min_resolution
lowerCamelCase :str = max_resolution
lowerCamelCase :Optional[int] = do_resize
lowerCamelCase :Dict = size
lowerCamelCase :Dict = do_center_crop
lowerCamelCase :List[Any] = crop_size
lowerCamelCase :Optional[Any] = do_normalize
lowerCamelCase :List[str] = image_mean
lowerCamelCase :List[str] = image_std
lowerCamelCase :Any = do_convert_rgb
def snake_case ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def snake_case ( self : List[Any] , __snake_case : List[Any]=False , __snake_case : List[Any]=False , __snake_case : Optional[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase :List[str] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase :str = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase :Optional[int] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase :Union[str, Any] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase :int = [torch.from_numpy(__snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__snake_case )
@property
def snake_case ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : int ):
lowerCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def snake_case ( self : str ):
lowerCamelCase :int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCamelCase :str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case ( self : Dict ):
pass
def snake_case ( self : Union[str, Any] ):
# Initialize image_processing
lowerCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
lowerCamelCase :int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :str = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self : Union[str, Any] ):
# Initialize image_processing
lowerCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase :str = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
lowerCamelCase :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :Optional[int] = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self : str ):
# Initialize image_processing
lowerCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase :List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
lowerCamelCase :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__snake_case )
lowerCamelCase :Optional[Any] = 3
@property
def snake_case ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : List[str] ):
# Initialize image_processing
lowerCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
lowerCamelCase :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 166 | A__ = range(2, 20 + 1)
A__ = [10**k for k in range(ks[-1] + 1)]
A__ = {}
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : str):
lowerCamelCase :Optional[Any] = sum(a_i[j] for j in range(a_ , len(a_)))
lowerCamelCase :Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(a_) , a_)))
lowerCamelCase , lowerCamelCase :Optional[int] = 0, 0
lowerCamelCase :int = n - i
lowerCamelCase :Optional[Any] = memo.get(a_)
if sub_memo is not None:
lowerCamelCase :List[Any] = sub_memo.get(a_)
if jumps is not None and len(a_) > 0:
# find and make the largest jump without going over
lowerCamelCase :Any = -1
for _k in range(len(a_) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase :Union[str, Any] = _k
break
if max_jump >= 0:
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase :List[Any] = diff + c
for j in range(min(a_ , len(a_))):
lowerCamelCase , lowerCamelCase :Tuple = divmod(a_ , 10)
if new_c > 0:
add(a_ , a_ , a_)
else:
lowerCamelCase :Optional[int] = []
else:
lowerCamelCase :List[Any] = {c: []}
lowerCamelCase :Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase , lowerCamelCase :Union[str, Any] = next_term(a_ , k - 1 , i + dn , a_)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase , lowerCamelCase :List[str] = compute(a_ , a_ , i + dn , a_)
diff += _diff
dn += terms_jumped
lowerCamelCase :Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase :Optional[int] = 0
while j < len(a_):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a_ , (diff, dn, k))
return (diff, dn)
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[str] , a_ : int , a_ : Union[str, Any]):
if i >= n:
return 0, i
if k > len(a_):
a_i.extend([0 for _ in range(k - len(a_))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase :int = i
lowerCamelCase , lowerCamelCase , lowerCamelCase :List[str] = 0, 0, 0
for j in range(len(a_)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase :Optional[Any] = ds_c + ds_b
diff += addend
lowerCamelCase :int = 0
for j in range(a_):
lowerCamelCase :Dict = a_i[j] + addend
lowerCamelCase , lowerCamelCase :int = divmod(a_ , 10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a_ , a_ , a_)
return diff, i - start_i
def _lowerCamelCase ( a_ : int , a_ : str , a_ : str):
for j in range(a_ , len(a_)):
lowerCamelCase :Tuple = digits[j] + addend
if s >= 10:
lowerCamelCase , lowerCamelCase :Union[str, Any] = divmod(a_ , 10)
lowerCamelCase :Tuple = addend // 10 + quotient
else:
lowerCamelCase :Optional[Any] = s
lowerCamelCase :Tuple = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase , lowerCamelCase :int = divmod(a_ , 10)
digits.append(a_)
def _lowerCamelCase ( a_ : int = 10**15):
lowerCamelCase :Optional[int] = [1]
lowerCamelCase :Optional[Any] = 1
lowerCamelCase :Dict = 0
while True:
lowerCamelCase , lowerCamelCase :Optional[Any] = next_term(a_ , 20 , i + dn , a_)
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase :Optional[Any] = 0
for j in range(len(a_)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 166 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowerCAmelCase_ = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowerCAmelCase_ = """document_qa"""
lowerCAmelCase_ = AutoProcessor
lowerCAmelCase_ = VisionEncoderDecoderModel
lowerCAmelCase_ = ["""image""", """text"""]
lowerCAmelCase_ = ["""text"""]
def __init__( self , *A_ , **A_ )-> str:
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCamelCase = task_prompt.replace('{user_input}' , A_ )
UpperCamelCase = self.pre_processor.tokenizer(
A_ , add_special_tokens=A_ , return_tensors='pt' ).input_ids
UpperCamelCase = self.pre_processor(A_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A_ , ).sequences
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.pre_processor.batch_decode(A_ )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
UpperCamelCase = re.sub(R'<.*?>' , '' , A_ , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(A_ )
return sequence["answer"]
| 432 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 432 | 1 |
from math import isqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10**8 ) -> int:
"""simple docstring"""
_UpperCAmelCase = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case : Optional[Any] = TypeVar('T')
snake_case : List[Any] = TypeVar('U')
class lowerCAmelCase__ ( Generic[T, U] ):
def __init__( self : Optional[Any] , _A : T | None , _A : U | None):
A__ : Union[str, Any] = key
A__ : Dict = val
A__ : DoubleLinkedListNode[T, U] | None = None
A__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Union[str, Any]):
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next)}, has prev: {bool(self.prev)}'
)
class lowerCAmelCase__ ( Generic[T, U] ):
def __init__( self : Union[str, Any]):
A__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A , _A)
A__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A , _A)
A__ , A__ : Tuple = self.rear, self.head
def __repr__( self : List[str]):
A__ : Union[str, Any] = ["DoubleLinkedList"]
A__ : int = self.head
while node.next is not None:
rep.append(str(_A))
A__ : str = node.next
rep.append(str(self.rear))
return ",\n ".join(_A)
def _lowercase ( self : List[Any] , _A : DoubleLinkedListNode[T, U]):
A__ : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
A__ : Dict = node
A__ : int = previous
A__ : Dict = node
A__ : List[Any] = self.rear
def _lowercase ( self : int , _A : DoubleLinkedListNode[T, U]):
if node.prev is None or node.next is None:
return None
A__ : List[str] = node.next
A__ : Union[str, Any] = node.prev
A__ : Optional[Any] = None
A__ : List[str] = None
return node
class lowerCAmelCase__ ( Generic[T, U] ):
__A : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[Any] , _A : int):
A__ : DoubleLinkedList[T, U] = DoubleLinkedList()
A__ : Union[str, Any] = capacity
A__ : List[str] = 0
A__ : Any = 0
A__ : int = 0
A__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Optional[int]):
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , _A : T):
return key in self.cache
def _lowercase ( self : Dict , _A : T):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
A__ : DoubleLinkedListNode[T, U] = self.cache[key]
A__ : str = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_A)
return node.val
self.miss += 1
return None
def _lowercase ( self : List[str] , _A : T , _A : U):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
A__ : str = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_A) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
A__ : Optional[int] = DoubleLinkedListNode(_A , _A)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
A__ : str = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
A__ : int = value
self.list.add(_A)
@classmethod
def _lowercase ( cls : Dict , _A : int = 128):
def cache_decorator_inner(_A : Callable[[T], U]) -> Callable[..., U]:
def cache_decorator_wrapper(*_A : T) -> U:
if func not in cls.decorator_function_to_instance_map:
A__ : str = LRUCache(_A)
A__ : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
A__ : Tuple = func(*_A)
cls.decorator_function_to_instance_map[func].put(args[0] , _A)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_A , "cache_info" , _A) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Dict , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : Dict , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
A__ : Optional[int] = field
A__ : Dict = path_or_paths if isinstance(_A , _A) else {self.split: path_or_paths}
A__ : int = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def _lowercase ( self : Dict):
# Build iterable dataset
if self.streaming:
A__ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
A__ : Optional[Any] = None
A__ : Any = None
A__ : Tuple = None
A__ : Tuple = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
A__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Any , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.')
A__ : Tuple = dataset
A__ : Tuple = path_or_buf
A__ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ : Tuple = num_proc
A__ : Any = "utf-8"
A__ : Optional[int] = to_json_kwargs
def _lowercase ( self : Union[str, Any]):
A__ : Any = self.to_json_kwargs.pop("path_or_buf" , _A)
A__ : Union[str, Any] = self.to_json_kwargs.pop("orient" , "records")
A__ : List[Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False)
A__ : List[Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True)
A__ : int = self.to_json_kwargs.pop("compression" , _A)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , "wb" , compression=_A) as buffer:
A__ : int = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
" was passed. Please provide a local path instead.")
A__ : Optional[Any] = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
return written
def _lowercase ( self : List[Any] , _A : int):
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = args
A__ : Dict = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size) , indices=self.dataset._indices , )
A__ : Dict = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _lowercase ( self : List[str] , _A : BinaryIO , _A : str , _A : List[Any] , _A : Optional[Any] , **_A : Optional[Any] , ):
A__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_A)
else:
A__ , A__ : Union[str, Any] = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(_A)
return written | 182 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = {}
with open(_lowercase , 'r' ) as file:
for line_number, line in enumerate(_lowercase ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__UpperCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(_lowercase , _lowercase ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__UpperCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__UpperCamelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = '.'.join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if 'lm_head' in full_key else value[0]
__snake_case = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , _lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return is_used
return is_used
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(_lowercase , _lowercase , _lowercase )
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False ) -> Dict:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(_lowercase )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(_lowercase )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(_lowercase )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
feature_extractor.save_pretrained(_lowercase )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(_lowercase , 'vocab.json' )
if not os.path.isdir(_lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(_lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowercase , _lowercase )
__UpperCamelCase = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowercase , )
__UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__UpperCamelCase = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__UpperCamelCase = WavaVecaForCTC(_lowercase )
else:
__UpperCamelCase = WavaVecaForPreTraining(_lowercase )
if is_finetuned or is_seq_class:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
__UpperCamelCase = fairseq.tasks.setup_task(_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowercase )
__UpperCamelCase = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__snake_case = parser.parse_args()
__snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a__ :
def __init__( self : List[Any] ,a__ : str ,a__ : Optional[Any]=13 ,a__ : Union[str, Any]=7 ,a__ : str=True ,a__ : Union[str, Any]=True ,a__ : Dict=True ,a__ : Optional[int]=True ,a__ : Optional[int]=99 ,a__ : Dict=64 ,a__ : Union[str, Any]=32 ,a__ : Union[str, Any]=5 ,a__ : Union[str, Any]=4 ,a__ : str=37 ,a__ : int="gelu" ,a__ : str=0.1 ,a__ : int=0.1 ,a__ : Optional[int]=512 ,a__ : Dict=16 ,a__ : Any=2 ,a__ : str=0.02 ,a__ : List[str]=3 ,a__ : Dict=4 ,a__ : Optional[Any]=None ,) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = parent
_lowerCAmelCase:Union[str, Any] = batch_size
_lowerCAmelCase:Dict = seq_length
_lowerCAmelCase:Union[str, Any] = is_training
_lowerCAmelCase:Any = use_input_mask
_lowerCAmelCase:Dict = use_token_type_ids
_lowerCAmelCase:Optional[int] = use_labels
_lowerCAmelCase:List[Any] = vocab_size
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Optional[int] = embedding_size
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:Optional[Any] = num_attention_heads
_lowerCAmelCase:List[str] = intermediate_size
_lowerCAmelCase:List[str] = hidden_act
_lowerCAmelCase:Any = hidden_dropout_prob
_lowerCAmelCase:int = attention_probs_dropout_prob
_lowerCAmelCase:Tuple = max_position_embeddings
_lowerCAmelCase:Optional[Any] = type_vocab_size
_lowerCAmelCase:Any = type_sequence_label_size
_lowerCAmelCase:Optional[int] = initializer_range
_lowerCAmelCase:List[str] = num_labels
_lowerCAmelCase:List[str] = num_choices
_lowerCAmelCase:int = scope
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
_lowerCAmelCase:Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase:int = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase:str = None
if self.use_token_type_ids:
_lowerCAmelCase:Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
_lowerCAmelCase:Tuple = None
_lowerCAmelCase:Optional[Any] = None
_lowerCAmelCase:Optional[Any] = None
if self.use_labels:
_lowerCAmelCase:List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
_lowerCAmelCase:Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
_lowerCAmelCase:Dict = ids_tensor([self.batch_size] ,self.num_choices)
_lowerCAmelCase:Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=a__ ,initializer_range=self.initializer_range ,)
def __UpperCamelCase ( self : Any ,a__ : int ,a__ : Optional[Any] ,a__ : Dict ,a__ : Optional[Any] ,a__ : Dict ,a__ : int ,a__ : int) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = MegatronBertModel(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[Any] = model(a__ ,attention_mask=a__ ,token_type_ids=a__)
_lowerCAmelCase:int = model(a__ ,token_type_ids=a__)
_lowerCAmelCase:Dict = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Tuple ,a__ : str ,a__ : List[str] ,a__ : Tuple ,a__ : List[Any] ,a__ : List[str] ,a__ : Dict) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = MegatronBertForMaskedLM(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:List[Any] = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : Any ,a__ : Any ,a__ : Optional[Any] ,a__ : Tuple ,a__ : List[Any] ,a__ : Dict ,a__ : Tuple ,a__ : Any) -> str:
"""simple docstring"""
_lowerCAmelCase:List[Any] = MegatronBertForCausalLM(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:int = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : List[Any] ,a__ : Union[str, Any] ,a__ : Any ,a__ : int ,a__ : Any ,a__ : int ,a__ : List[str] ,a__ : List[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Tuple = MegatronBertForNextSentencePrediction(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Dict = model(
a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2))
def __UpperCamelCase ( self : Optional[int] ,a__ : Optional[int] ,a__ : List[Any] ,a__ : Tuple ,a__ : Optional[int] ,a__ : Any ,a__ : Optional[int] ,a__ : Optional[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:str = MegatronBertForPreTraining(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[Any] = model(
a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__ ,next_sentence_label=a__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2))
def __UpperCamelCase ( self : Dict ,a__ : Any ,a__ : Any ,a__ : Optional[Any] ,a__ : Tuple ,a__ : Tuple ,a__ : Optional[Any] ,a__ : Union[str, Any]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = MegatronBertForQuestionAnswering(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Union[str, Any] = model(
a__ ,attention_mask=a__ ,token_type_ids=a__ ,start_positions=a__ ,end_positions=a__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def __UpperCamelCase ( self : Optional[Any] ,a__ : Dict ,a__ : Dict ,a__ : List[Any] ,a__ : int ,a__ : List[str] ,a__ : Union[str, Any] ,a__ : List[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.num_labels
_lowerCAmelCase:str = MegatronBertForSequenceClassification(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Tuple = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def __UpperCamelCase ( self : Dict ,a__ : Tuple ,a__ : Union[str, Any] ,a__ : List[str] ,a__ : Optional[Any] ,a__ : List[Any] ,a__ : Tuple ,a__ : str) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.num_labels
_lowerCAmelCase:Optional[int] = MegatronBertForTokenClassification(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[Any] = model(a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self : Tuple ,a__ : Tuple ,a__ : List[str] ,a__ : List[Any] ,a__ : List[Any] ,a__ : Any ,a__ : List[str] ,a__ : int) -> int:
"""simple docstring"""
_lowerCAmelCase:str = self.num_choices
_lowerCAmelCase:Optional[int] = MegatronBertForMultipleChoice(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:List[Any] = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
_lowerCAmelCase:Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
_lowerCAmelCase:Tuple = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
_lowerCAmelCase:List[Any] = model(
a__ ,attention_mask=a__ ,token_type_ids=a__ ,labels=a__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def __UpperCamelCase ( self : int) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
):Union[str, Any] = config_and_inputs
_lowerCAmelCase:List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
# test_resize_embeddings = False
snake_case__ = False
def __UpperCamelCase ( self : Tuple ,a__ : str ,a__ : Any ,a__ : Tuple=False) -> int:
"""simple docstring"""
_lowerCAmelCase:List[Any] = super()._prepare_for_class(a__ ,a__ ,return_labels=a__)
if return_labels:
if model_class in get_values(a__):
_lowerCAmelCase:Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=a__)
_lowerCAmelCase:Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=a__)
return inputs_dict
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Any = MegatronBertModelTester(self)
_lowerCAmelCase:List[str] = ConfigTester(self ,config_class=a__ ,hidden_size=37)
def __UpperCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*a__)
def __UpperCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*a__)
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*a__)
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*a__)
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*a__)
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*a__)
def __UpperCamelCase ( self : int) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*a__)
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*a__)
def UpperCAmelCase ( snake_case : Any ):
return torch.tensor(
snake_case , dtype=torch.long , device=snake_case , )
UpperCamelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''')
def __UpperCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
_lowerCAmelCase:List[str] = os.path.join(os.environ['''MYDIR'''] ,a__)
_lowerCAmelCase:Optional[Any] = MegatronBertModel.from_pretrained(a__)
model.to(a__)
model.half()
_lowerCAmelCase:int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]])
with torch.no_grad():
_lowerCAmelCase:List[Any] = model(a__)[0]
_lowerCAmelCase:Dict = torch.Size((1, 9, 1024))
self.assertEqual(output.shape ,a__)
_lowerCAmelCase:Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3):
for jj in range(3):
_lowerCAmelCase:int = output[0, ii, jj]
_lowerCAmelCase:List[Any] = expected[3 * ii + jj]
_lowerCAmelCase:str = '''ii={} jj={} a={} b={}'''.format(a__ ,a__ ,a__ ,a__)
self.assertTrue(math.isclose(a__ ,a__ ,rel_tol=a__ ,abs_tol=a__) ,msg=a__)
| 227 | 0 |
import numpy as np
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return vector * sigmoid(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCamelCase : Union[str, Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCAmelCase )
| 599 | """simple docstring"""
_UpperCamelCase : Any = {str(digit): digit**5 for digit in range(10)}
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCAmelCase ) )
def a_ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(_lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 599 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase , lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.zeros_like(lowerCAmelCase__ )
lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ :str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
lowercase__ :List[str] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ :Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ :str = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 633 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : Any = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_SCREAMING_SNAKE_CASE : Dict = Dataset.from_dict(lowerCamelCase )
return dataset
class _lowerCAmelCase ( __UpperCAmelCase ):
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = get_dataset()
_SCREAMING_SNAKE_CASE : List[str] = make_duplicate_clusters(lowerCAmelCase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = get_dataset()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = deduplicate_dataset(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
print(lowerCAmelCase_ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , lowerCAmelCase_ )
| 621 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'A painting of a squirrel eating a burger'
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.device_count()
_SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = replicate(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_SCREAMING_SNAKE_CASE : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = 'stabilityai/stable-diffusion-2'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision='bf16' , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : Any = scheduler_params
_SCREAMING_SNAKE_CASE : int = 'A painting of a squirrel eating a burger'
_SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = replicate(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = shard(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE : int = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_SCREAMING_SNAKE_CASE : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 621 | 1 |
def UpperCAmelCase_( a__ ) -> Optional[Any]:
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase_( a__ , a__ ) -> int:
"""simple docstring"""
if equation(a__ ) * equation(a__ ) >= 0:
raise ValueError('''Wrong space!''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
SCREAMING_SNAKE_CASE : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(a__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(a__ ) * equation(a__ ) < 0:
SCREAMING_SNAKE_CASE : Optional[int] = c
else:
SCREAMING_SNAKE_CASE : int = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 711 |
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
if index == r:
for j in range(a__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE : Union[str, Any] = arr[i]
combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a__ , a__ , a__ , 0 , a__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a__ : Dict = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 333 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
# Initialise PyTorch model
UpperCamelCase_: Dict = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: int = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
A_ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 57 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : int =PegasusConfig
a : List[str] ={}
a : Optional[int] ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=4_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: List[Any] = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Dict = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Optional[Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase_: Optional[int] = inputs_dict['input_ids']
UpperCamelCase_: Optional[int] = input_ids[:1, :]
UpperCamelCase_: int = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_: Optional[int] = inputs_dict['head_mask']
UpperCamelCase_: Optional[int] = 1
# first forward pass
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> str:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a : int =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a : Tuple =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : List[str] =True
a : List[str] =False
a : Tuple =False
def _a ( self ):
UpperCamelCase_: Dict = TFPegasusModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a : int =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a : Union[str, Any] ='''google/pegasus-xsum'''
@cached_property
def _a ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ):
UpperCamelCase_: Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Dict = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors='tf' )
UpperCamelCase_: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCamelCase_: str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _a ( self ):
self._assert_generated_batch_equal_expected() | 57 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__magic_name__ : Optional[int] = 'sshleifer/bart-tiny-random'
__magic_name__ : Any = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return AutoConfig.from_pretrained(__UpperCamelCase )
def lowercase_ ( self ):
A_ , *A_ = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowercase_ ( self ):
A_ , *A_ = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
def lowercase_ ( self ):
A_ , *A_ = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowercase_ ( self ):
A_ , *A_ = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowercase_ ( self ):
with self.assertRaises(__UpperCamelCase ):
create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=__UpperCamelCase , d=__UpperCamelCase )
| 608 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ['GLPNFeatureExtractor']
__magic_name__ : Tuple = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["ConvNextFeatureExtractor"]
UpperCAmelCase_ : Any = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 491 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase_ : List[Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( _A : Optional[Any]=None )-> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A__ = subparsers.add_parser("tpu-config" , description=_description )
else:
A__ = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
A__ = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_A , default=_A , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_A , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_A , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
A__ = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_A , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def UpperCamelCase ( _A : Optional[Any] )-> Optional[int]:
"""simple docstring"""
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
A__ = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _A ):
A__ = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
A__ = "; ".join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(_A )}""" )
return
subprocess.run(_A )
print("Successfully setup pod." )
def UpperCamelCase ( )-> Optional[int]:
"""simple docstring"""
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(_A )
| 491 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a , __a , __a ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = "pil" , __a = True , **__a , ):
"""simple docstring"""
A__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__a , )
A__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A__ = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
A__ = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# decode the image latents with the VAE
A__ = self.vqvae.decode(__a ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 554 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE : Dict = '''Create a default config file for Accelerate with only a few flags set.'''
def __lowerCamelCase ( lowerCAmelCase__="no" ,lowerCAmelCase__ = default_json_config_file ,lowerCAmelCase__ = False ):
A__ = Path(lowerCAmelCase__ )
path.parent.mkdir(parents=lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
A__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
A__ = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
A__ = num_gpus
A__ = False
if num_gpus > 1:
A__ = 'MULTI_GPU'
else:
A__ = 'NO'
elif is_xpu_available() and use_xpu:
A__ = torch.xpu.device_count()
A__ = num_xpus
A__ = False
if num_xpus > 1:
A__ = 'MULTI_XPU'
else:
A__ = 'NO'
elif is_npu_available():
A__ = torch.npu.device_count()
A__ = num_npus
A__ = False
if num_npus > 1:
A__ = 'MULTI_NPU'
else:
A__ = 'NO'
else:
A__ = 0
A__ = True
A__ = 1
A__ = 'NO'
A__ = ClusterConfig(**lowerCAmelCase__ )
config.to_json_file(lowerCAmelCase__ )
return path
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = parser.add_parser('default' ,parents=lowerCAmelCase__ ,help=lowerCAmelCase__ ,formatter_class=lowerCAmelCase__ )
parser.add_argument(
'--config_file' ,default=lowerCAmelCase__ ,help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) ,dest='save_location' ,)
parser.add_argument(
'--mixed_precision' ,choices=['no', 'fp16', 'bf16'] ,type=lowerCAmelCase__ ,help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' ,default='no' ,)
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = write_basic_config(args.mixed_precision ,args.save_location )
if config_file:
print(f'''accelerate configuration saved at {config_file}''' )
| 554 | 1 |
def __magic_name__ ( lowercase ) -> List[str]:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
lowercase_ : Optional[int] = len(lowercase )
lowercase_ : int = max(lowercase )
lowercase_ : Dict = min(lowercase )
# create the counting array
lowercase_ : Optional[int] = coll_max + 1 - coll_min
lowercase_ : Union[str, Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase ):
lowercase_ : Union[str, Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase_ : Union[str, Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase ) ):
lowercase_ : Optional[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
return "".join([chr(lowercase ) for i in counting_sort([ord(lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
UpperCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 458 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( lowercase ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[Any] = ["""pixel_values"""]
def __init__( self, snake_case__ = True, snake_case__ = None, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = True, snake_case__ = None, snake_case__ = True, snake_case__ = 1 / 2_55, snake_case__ = True, snake_case__ = True, snake_case__ = None, snake_case__ = None, **snake_case__, ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : str = size if size is not None else {"""shortest_edge""": 2_56}
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase_ : Optional[int] = get_size_dict(snake_case__, param_name="""crop_size""" )
lowercase_ : List[Any] = do_resize
lowercase_ : int = size
lowercase_ : int = do_center_crop
lowercase_ : Optional[Any] = crop_size
lowercase_ : str = resample
lowercase_ : Any = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : List[Any] = offset
lowercase_ : int = do_normalize
lowercase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = get_resize_output_image_size(snake_case__, size["""shortest_edge"""], default_to_square=snake_case__ )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case__, size=snake_case__, resample=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : List[Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case__, size=(size["""height"""], size["""width"""]), data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = True, snake_case__ = None, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Dict = image.astype(np.floataa )
if offset:
lowercase_ : Optional[Any] = image - (scale / 2)
return rescale(snake_case__, scale=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__, mean=snake_case__, std=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase_ : Optional[int] = to_numpy_array(snake_case__ )
if do_resize:
lowercase_ : Dict = self.resize(image=snake_case__, size=snake_case__, resample=snake_case__ )
if do_center_crop:
lowercase_ : Optional[int] = self.center_crop(snake_case__, size=snake_case__ )
if do_rescale:
lowercase_ : Dict = self.rescale(image=snake_case__, scale=snake_case__, offset=snake_case__ )
if do_normalize:
lowercase_ : Optional[Any] = self.normalize(image=snake_case__, mean=snake_case__, std=snake_case__ )
lowercase_ : Tuple = to_channel_dimension_format(snake_case__, snake_case__ )
return image
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, **snake_case__, ) -> PIL.Image.Image:
"""simple docstring"""
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[Any] = offset if offset is not None else self.offset
lowercase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Tuple = get_size_dict(snake_case__, param_name="""crop_size""" )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase_ : Optional[Any] = make_batched(snake_case__ )
lowercase_ : Optional[Any] = [
[
self._preprocess_image(
image=snake_case__, do_resize=snake_case__, size=snake_case__, resample=snake_case__, do_center_crop=snake_case__, crop_size=snake_case__, do_rescale=snake_case__, rescale_factor=snake_case__, offset=snake_case__, do_normalize=snake_case__, image_mean=snake_case__, image_std=snake_case__, data_format=snake_case__, )
for img in video
]
for video in videos
]
lowercase_ : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case__, tensor_type=snake_case__ ) | 458 | 1 |
'''simple docstring'''
def __lowerCamelCase ():
return 1
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int = 2_0_0 ):
return two_pound(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 713 | import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
_lowercase : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = {}
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as file:
for line_number, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def lowerCamelCase__ ( A : List[str] , A : Union[str, Any] , A : List[str] , A : Tuple , A : str ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase__ ( A : Optional[Any] , A : List[Any] , A : Any , A : List[str] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = '.'.join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if 'lm_head' in full_key else value[0]
_lowercase : Optional[int] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCamelCase__ ( A : List[Any] , A : Optional[int] , A : Optional[int]=None , A : str=None ):
'''simple docstring'''
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(__SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
UpperCAmelCase = mapped_key.replace('''*''' , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase = 'weight_v'
elif "bias" in name:
UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = 'weight'
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return is_used
return is_used
def lowerCamelCase__ ( A : List[str] , A : Union[str, Any] , A : List[str] ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( A : List[Any] , A : Any , A : Any , A : Union[str, Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase = name.split('''.''' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase__ ( A : Tuple , A : str , A : Dict=None , A : int=None , A : Dict=True , A : Union[str, Any]=False ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(__SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__SCREAMING_SNAKE_CASE ) )
return
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = WavaVecaCTCTokenizer(
__SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = WavaVecaForCTC(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = WavaVecaForPreTraining(__SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase = fairseq.tasks.setup_task(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model[0].eval()
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase : Optional[int] = parser.parse_args()
_lowercase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 210 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = emb.weight.shape
UpperCamelCase__ : Optional[int] = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = emb.weight.data
return lin_layer
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
UpperCamelCase__ : int = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCamelCase__ : str = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
UpperCamelCase__ : List[Any] = 'relu'
UpperCamelCase__ : int = state_dict['decoder.embed_tokens.weight']
UpperCamelCase__ : Optional[int] = MBartForConditionalGeneration(__SCREAMING_SNAKE_CASE )
model.model.load_state_dict(__SCREAMING_SNAKE_CASE )
if finetuned:
UpperCamelCase__ : Union[str, Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Dict = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 410 | 0 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _A ( A__ , A__ , A__ , A__=None ):
"""simple docstring"""
__lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowercase , __lowercase = True, True
__lowercase = dfs(A__ , A__ , A__ , A__ )
return path
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = -1
for i in range(A__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__lowercase , __lowercase = check_circuit_or_path(A__ , A__ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
__lowercase = 1
if check == 2:
__lowercase = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
__lowercase = dfs(A__ , A__ , A__ )
print(A__ )
def _A ( ):
"""simple docstring"""
__lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__lowercase = {
1: [],
2: []
# all degree is zero
}
__lowercase = 10
check_euler(A__ , A__ )
check_euler(A__ , A__ )
check_euler(A__ , A__ )
check_euler(A__ , A__ )
check_euler(A__ , A__ )
if __name__ == "__main__":
main()
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if "model" in orig_key:
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
__SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
__SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
__SCREAMING_SNAKE_CASE : Dict = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
__SCREAMING_SNAKE_CASE : Any = orig_key.split(""".""" )[0].split("""_""" )[-1]
__SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
__SCREAMING_SNAKE_CASE : Tuple = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
__SCREAMING_SNAKE_CASE : Dict = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
__SCREAMING_SNAKE_CASE : Any = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
__SCREAMING_SNAKE_CASE : Tuple = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
__SCREAMING_SNAKE_CASE : int = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
__SCREAMING_SNAKE_CASE : Dict = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
__SCREAMING_SNAKE_CASE : int = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
__SCREAMING_SNAKE_CASE : Any = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
__SCREAMING_SNAKE_CASE : Any = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
__SCREAMING_SNAKE_CASE : List[Any] = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: int ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Dict = orig_state_dict.pop(snake_case_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
__SCREAMING_SNAKE_CASE : Dict = orig_state_dict["""cls.predictions.decoder.bias"""]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(snake_case_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case_ , map_location="""cpu""" )["""model_state_dict"""]
__SCREAMING_SNAKE_CASE : str = YosoConfig.from_json_file(snake_case_ )
__SCREAMING_SNAKE_CASE : int = YosoForMaskedLM(snake_case_ )
__SCREAMING_SNAKE_CASE : Tuple = convert_checkpoint_helper(config.max_position_embeddings , snake_case_ )
print(model.load_state_dict(snake_case_ ) )
model.eval()
model.save_pretrained(snake_case_ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 578 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 387 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class a__ ( unittest.TestCase ):
def a_ ( self : Any):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=UpperCamelCase_ , )
assert hasattr(self , "env")
def a_ ( self : Optional[Any] , UpperCamelCase_ : int=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=UpperCamelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def a_ ( self : List[str] , UpperCamelCase_ : str):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase_).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__UpperCAmelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__UpperCAmelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase_)
| 711 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : Optional[int] = mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
__UpperCAmelCase : Any = max(
mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , )
__UpperCAmelCase : str = val
return f[i][j]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCAmelCase : int = dp[i - 1][w_]
return dp[n][w_], dp
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if not (isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
if num_items != len(UpperCamelCase ):
__UpperCAmelCase : int = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(UpperCamelCase )} values"
)
raise ValueError(UpperCamelCase )
for i in range(UpperCamelCase ):
if not isinstance(wt[i] , UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"All weights must be integers but got weight of "
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : set = set()
_construct_solution(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return optimal_val, example_optional_set
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , UpperCamelCase , UpperCamelCase )
else:
optimal_set.add(UpperCamelCase )
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , j - wt[i - 1] , UpperCamelCase )
if __name__ == "__main__":
A = [3, 2, 4, 4]
A = [4, 3, 2, 3]
A = 4
A = 6
A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A , A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A , A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 487 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE : int = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE : int = Accelerator()
SCREAMING_SNAKE_CASE : int = accelerator.prepare(lowerCamelCase_ )
try:
pickle.loads(pickle.dumps(lowerCamelCase_ ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 379 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = torchvision.models.resnetaaa(pretrained=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : Any = nn.Sequential(*lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.pool(self.model(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = torch.flatten(lowerCamelCase_ , start_dim=2 )
SCREAMING_SNAKE_CASE : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [json.loads(lowerCamelCase_ ) for l in open(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = labels
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : List[Any] = transforms
def __len__( self : Any ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : int = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : int = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : str = self.transforms(lowerCamelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [len(row["""sentence"""] ) for row in batch]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ), max(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
SCREAMING_SNAKE_CASE : Any = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = input_row["""sentence"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : List[str] = torch.stack([row["""image"""] for row in batch] )
SCREAMING_SNAKE_CASE : int = torch.stack([row["""label"""] for row in batch] )
SCREAMING_SNAKE_CASE : str = torch.stack([row["""image_start_token"""] for row in batch] )
SCREAMING_SNAKE_CASE : Any = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __A ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __A ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 379 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Any=14 , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : int=16 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : int=0.0_2 , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_token_type_ids
lowerCamelCase__ : Dict = use_input_mask
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : List[str] = use_mc_token_ids
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : str = self.vocab_size - 1
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = None
if self.use_input_mask:
lowerCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : int = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_mc_token_ids:
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Tuple = self.get_config()
lowerCamelCase__ : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , *__lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = CTRLModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : int = CTRLLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , *__lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = CTRLForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A__ = (CTRLLMHeadModel,) if is_torch_available() else ()
A__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
A__ = False
A__ = False
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = CTRLModelTester(self )
lowerCamelCase__ : str = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str = CTRLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : int = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__lowerCamelCase ) # Legal the president is
lowerCamelCase__ : Optional[Any] = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase__ : List[Any] = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 5 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__SCREAMING_SNAKE_CASE = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__SCREAMING_SNAKE_CASE = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__SCREAMING_SNAKE_CASE = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str]=False ) -> Any:
lowerCAmelCase :str = spearmanr(UpperCAmelCase , UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 553 |
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = len(a__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase :Dict = 0
print(a__ , end=',' )
# Consider rest of the activities
for j in range(a__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a__ , end=',' )
lowerCAmelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = [1, 3, 0, 5, 8, 5]
__SCREAMING_SNAKE_CASE = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 553 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__SCREAMING_SNAKE_CASE = 'pt'
elif is_tf_available():
__SCREAMING_SNAKE_CASE = 'tf'
else:
__SCREAMING_SNAKE_CASE = 'jax'
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = PerceiverTokenizer
__UpperCamelCase = False
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
a__ : Union[str, Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __lowerCAmelCase ( self : str , **A__ : List[str] ) -> PerceiverTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : Optional[Any] , A__ : Tuple , A__ : Optional[Any]=False , A__ : List[str]=2_0 , A__ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
a__ : List[Any] = []
for i in range(len(A__ ) ):
try:
a__ : Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=A__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ : Tuple = list(filter(lambda A__ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , A__ ) )
a__ : Dict = list(filter(lambda A__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A__ ) , A__ ) )
if max_length is not None and len(A__ ) > max_length:
a__ : Optional[int] = toks[:max_length]
if min_length is not None and len(A__ ) < min_length and len(A__ ) > 0:
while len(A__ ) < min_length:
a__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
a__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
a__ : List[str] = tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ )
if " " not in output_txt and len(A__ ) > 1:
a__ : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A__ )
)
if with_prefix_space:
a__ : List[Any] = ''' ''' + output_txt
a__ : Union[str, Any] = tokenizer.encode(A__ , add_special_tokens=A__ )
return output_txt, output_ids
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.perceiver_tokenizer
a__ : List[str] = '''Unicode €.'''
a__ : Optional[int] = tokenizer(A__ )
a__ : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , A__ )
# decoding
a__ : Optional[int] = tokenizer.decode(A__ )
self.assertEqual(A__ , '''[CLS]Unicode €.[SEP]''' )
a__ : Optional[Any] = tokenizer('''e è é ê ë''' )
a__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , A__ )
# decoding
a__ : Optional[int] = tokenizer.decode(A__ )
self.assertEqual(A__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = self.perceiver_tokenizer
a__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
a__ : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
a__ : List[Any] = tokenizer(A__ , padding=A__ , return_tensors=A__ )
self.assertIsInstance(A__ , A__ )
if FRAMEWORK != "jax":
a__ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
a__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A__ , A__ )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.perceiver_tokenizer
a__ : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a__ : Any = tokenizer(A__ , padding=A__ , return_tensors=A__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , A__ )
self.assertIn('''attention_mask''' , A__ )
self.assertNotIn('''decoder_input_ids''' , A__ )
self.assertNotIn('''decoder_attention_mask''' , A__ )
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : List[str] = self.perceiver_tokenizer
a__ : Dict = [
'''Summary of the text.''',
'''Another summary.''',
]
a__ : Any = tokenizer(
text_target=A__ , max_length=3_2 , padding='''max_length''' , truncation=A__ , return_tensors=A__ )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def __lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
a__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : int = tempfile.mkdtemp()
a__ : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
a__ : List[str] = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
a__ : str = tokenizer.__class__.from_pretrained(A__ )
a__ : Union[str, Any] = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
shutil.rmtree(A__ )
a__ : int = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : int = tempfile.mkdtemp()
a__ : Tuple = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
a__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
a__ : List[str] = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
a__ : Tuple = tokenizer.__class__.from_pretrained(A__ )
a__ : Optional[Any] = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
a__ : Optional[Any] = tokenizer.__class__.from_pretrained(A__ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(A__ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A__ )
with open(os.path.join(A__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
a__ : List[str] = json.load(A__ )
with open(os.path.join(A__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
a__ : int = json.load(A__ )
a__ : Dict = [F'<extra_id_{i}>' for i in range(1_2_5 )]
a__ : str = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
a__ : List[str] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(A__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A__ , A__ )
with open(os.path.join(A__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A__ , A__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ : Any = tokenizer_class.from_pretrained(
A__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ : Dict = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=A__ )]
a__ : Dict = tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.get_tokenizers(fast=A__ , do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a__ : Optional[Any] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
a__ : Optional[int] = tokenizer.convert_tokens_to_string(A__ )
self.assertIsInstance(A__ , A__ )
| 701 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if b == 0:
return (1, 0)
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , a % b )
a__ : Optional[int] = a // b
return (y, x - k * y)
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int = na * na
a__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : Union[str, Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[Any] = (b % n + n) % n
return b
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
a__ , a__ : Union[str, Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = na * na
a__ : str = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 340 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=1_3 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=9_9 , UpperCamelCase__ : Optional[Any]=6_4 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=None , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = embedding_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length])
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
snake_case__ = ids_tensor([self.batch_size] , self.num_choices)
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : str):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = MegatronBertModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = MegatronBertForMaskedLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = MegatronBertForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __magic_name__ ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = MegatronBertForNextSentencePrediction(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = MegatronBertForPreTraining(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = MegatronBertForQuestionAnswering(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = MegatronBertForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __magic_name__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = MegatronBertForTokenClassification(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __magic_name__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = self.num_choices
snake_case__ = MegatronBertForMultipleChoice(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
snake_case__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Tuple = True
# test_resize_embeddings = False
_lowercase : Optional[Any] = False
def __magic_name__ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False):
'''simple docstring'''
snake_case__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__)
if return_labels:
if model_class in get_values(UpperCamelCase__):
snake_case__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__)
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
return inputs_dict
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = MegatronBertModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def __magic_name__ ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase__)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase__)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase__)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase__)
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase__)
def _UpperCAmelCase ( a : str ):
return torch.tensor(
a , dtype=torch.long , device=a , )
a__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("""Model is not available.""")
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
snake_case__ = os.path.join(os.environ["""MYDIR"""] , UpperCamelCase__)
snake_case__ = MegatronBertModel.from_pretrained(UpperCamelCase__)
model.to(UpperCamelCase__)
model.half()
snake_case__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
snake_case__ = model(UpperCamelCase__)[0]
snake_case__ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , UpperCamelCase__)
snake_case__ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3):
for jj in range(3):
snake_case__ = output[0, ii, jj]
snake_case__ = expected[3 * ii + jj]
snake_case__ = """ii={} jj={} a={} b={}""".format(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
self.assertTrue(math.isclose(UpperCamelCase__ , UpperCamelCase__ , rel_tol=UpperCamelCase__ , abs_tol=UpperCamelCase__) , msg=UpperCamelCase__)
| 654 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a__ = logging.get_logger(__name__)
a__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a__ = {
"""jukebox""": 5_1_2,
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES
_lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token
super().__init__(
unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(UpperCamelCase__)
snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 7_9:
snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""")
snake_case__ = regex.compile(UpperCamelCase__)
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists]
for genres in range(len(UpperCamelCase__)):
snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
return list(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self._tokenize(UpperCamelCase__)
return artist, genre, lyrics
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False):
'''simple docstring'''
for idx in range(len(self.version)):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx]) + """.v2"""
snake_case__ = [
self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""")
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""")
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))}
snake_case__ = 0
snake_case__ = len(UpperCamelCase__) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = """"""
else:
snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""")
snake_case__ = self._run_strip_accents(UpperCamelCase__)
snake_case__ = lyrics.replace("""\\""" , """\n""")
snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], []
return artists, genres, lyrics
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__)
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(UpperCamelCase__)
if cat == "Mn":
continue
output.append(UpperCamelCase__)
return "".join(UpperCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = (
[chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)]
+ [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)]
+ ["""."""]
)
snake_case__ = frozenset(UpperCamelCase__)
snake_case__ = re.compile(R"""_+""")
snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()])
snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""")
return text
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return " ".join(UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = TensorType(UpperCamelCase__)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""")
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""")
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""")
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(UpperCamelCase__):
snake_case__ = as_tensor(UpperCamelCase__)
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""")
return inputs
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"):
'''simple docstring'''
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version)
snake_case__ = [genres] * len(self.version)
snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = [-INFINITY] * len(full_tokens[-1])
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__)
for i in range(len(self.version))
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks})
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__))
snake_case__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""])
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__))
return (artists_file, genres_file, lyrics_file)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = self.artists_decoder.get(UpperCamelCase__)
snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index]
return artist, genres, lyrics
| 654 | 1 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( __UpperCamelCase )-> float:
return np.dot(__UpperCamelCase , __UpperCamelCase )
class a_ :
def __init__( self , *,
_SCREAMING_SNAKE_CASE = np.inf , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = 0.0 , ) -> None:
"""simple docstring"""
UpperCamelCase = regularization
UpperCamelCase = gamma
if kernel == "linear":
UpperCamelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
UpperCamelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase = F"Unknown kernel: {kernel}"
raise ValueError(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = observations
UpperCamelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase) ,) = np.shape(_SCREAMING_SNAKE_CASE )
def to_minimize(_SCREAMING_SNAKE_CASE ) -> float:
UpperCamelCase = 0
((UpperCamelCase) ,) = np.shape(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_SCREAMING_SNAKE_CASE )
UpperCamelCase = LinearConstraint(_SCREAMING_SNAKE_CASE , 0 , 0 )
UpperCamelCase = Bounds(0 , self.regularization )
UpperCamelCase = minimize(
_SCREAMING_SNAKE_CASE , np.ones(_SCREAMING_SNAKE_CASE ) , bounds=_SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
UpperCamelCase = l_star
# calculating mean offset of separation plane to points
UpperCamelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCamelCase = s / n
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE__ = 'facebook/wmt19-en-de'
SCREAMING_SNAKE_CASE__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 35 | 0 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = """segformer"""
def __init__( self : int , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=[2, 2, 2, 2] , _lowercase : Union[str, Any]=[8, 4, 2, 1] , _lowercase : Optional[int]=[32, 64, 160, 256] , _lowercase : int=[7, 3, 3, 3] , _lowercase : int=[4, 2, 2, 2] , _lowercase : Tuple=[1, 2, 5, 8] , _lowercase : Any=[4, 4, 4, 4] , _lowercase : Any="gelu" , _lowercase : Optional[Any]=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : str=0.1 , _lowercase : int=0.02 , _lowercase : int=0.1 , _lowercase : int=1E-6 , _lowercase : Tuple=256 , _lowercase : int=255 , **_lowercase : Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , _lowercase , )
snake_case : Any = num_channels
snake_case : List[str] = num_encoder_blocks
snake_case : str = depths
snake_case : Dict = sr_ratios
snake_case : Tuple = hidden_sizes
snake_case : Any = patch_sizes
snake_case : Any = strides
snake_case : Any = mlp_ratios
snake_case : Any = num_attention_heads
snake_case : int = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = classifier_dropout_prob
snake_case : str = initializer_range
snake_case : int = drop_path_rate
snake_case : List[Any] = layer_norm_eps
snake_case : Tuple = decoder_hidden_size
snake_case : List[Any] = kwargs.get("reshape_last_stage" , _lowercase )
snake_case : str = semantic_loss_ignore_index
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = version.parse("""1.11""")
@property
def __lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowercase ( self : List[Any] ) -> float:
return 1E-4
@property
def __lowercase ( self : str ) -> int:
return 12
| 449 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
__magic_name__ = RobertaTokenizer
__magic_name__ = RobertaTokenizerFast
__magic_name__ = True
__magic_name__ = {"""cls_token""": """<s>"""}
def __lowercase ( self : Any ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case : Optional[int] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : Union[str, Any] = {"unk_token": "<unk>"}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
def __lowercase ( self : Optional[Any] , **_lowercase : Union[str, Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def __lowercase ( self : List[str] , **_lowercase : Union[str, Any] ) -> str:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __lowercase ( self : Union[str, Any] , _lowercase : str ) -> Optional[Any]:
snake_case : Any = "lower newer"
snake_case : Union[str, Any] = "lower newer"
return input_text, output_text
def __lowercase ( self : Optional[int] ) -> Any:
snake_case : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : Dict = "lower newer"
snake_case : Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case : Any = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True)
self.assertListEqual(_lowercase , _lowercase )
snake_case : List[str] = tokens + [tokenizer.unk_token]
snake_case : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __lowercase ( self : int ) -> str:
snake_case : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __lowercase ( self : Dict ) -> Optional[int]:
snake_case : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=_lowercase )
snake_case : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowercase )
snake_case : Optional[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase )
snake_case : Any = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowercase ( self : int ) -> Any:
snake_case : Any = self.get_tokenizer()
snake_case : Optional[Any] = "Encode this sequence."
snake_case : List[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowercase , _lowercase )
snake_case : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowercase , _lowercase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
# Testing spaces after special tokens
snake_case : Optional[int] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space
snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case : Tuple = "Encode <mask> sequence"
snake_case : Union[str, Any] = "Encode <mask>sequence"
snake_case : Union[str, Any] = tokenizer.encode(_lowercase )
snake_case : Optional[int] = encoded.index(_lowercase )
snake_case : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowercase , _lowercase )
snake_case : int = tokenizer.encode(_lowercase )
snake_case : Optional[int] = encoded.index(_lowercase )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
def __lowercase ( self : Dict ) -> Union[str, Any]:
pass
def __lowercase ( self : List[str] ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case : Optional[int] = "A, <mask> AllenNLP sentence."
snake_case : Tuple = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
snake_case : Optional[Any] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __lowercase ( self : Tuple ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _lowercase )
self.assertEqual(post_processor_state["add_prefix_space"] , _lowercase )
self.assertEqual(post_processor_state["trim_offsets"] , _lowercase )
def __lowercase ( self : int ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case : List[str] = F'''{text_of_1_token} {text_of_1_token}'''
snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Union[str, Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Tuple = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : int = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Optional[int] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Dict = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Optional[int] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Union[str, Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
| 449 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=1_6 , __lowerCamelCase=3_6 , __lowerCamelCase=6 , __lowerCamelCase=6 , __lowerCamelCase=6 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : List[Any] = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
_SCREAMING_SNAKE_CASE : Optional[int] = is_training
_SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
_SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : Dict = embedding_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_hidden_groups
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = num_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
_SCREAMING_SNAKE_CASE : Dict = scope
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> List[str]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Dict = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
_SCREAMING_SNAKE_CASE : List[Any] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
_SCREAMING_SNAKE_CASE : str = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
_SCREAMING_SNAKE_CASE : Optional[Any] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = AlbertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Dict:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Dict = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : str = AlbertModel.from_pretrained("albert-base-v2" )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 712 |
from __future__ import annotations
import requests
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__lowerCamelCase ).json()
def lowerCamelCase__ (__lowerCamelCase = 10 ):
_SCREAMING_SNAKE_CASE : List[Any] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_SCREAMING_SNAKE_CASE : Optional[Any] = requests.get(__lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCamelCase ) for story_id in story_ids]
def lowerCamelCase__ (__lowerCamelCase = 10 ):
_SCREAMING_SNAKE_CASE : List[str] = hackernews_top_stories(__lowerCamelCase )
return "\n".join("* [{title}]({url})".format(**__lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 381 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : int = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = 'gpt_neox'
def __init__( self : str , lowerCAmelCase__ : Optional[int]=50432 , lowerCAmelCase__ : Optional[int]=6144 , lowerCAmelCase__ : Optional[Any]=44 , lowerCAmelCase__ : Optional[Any]=64 , lowerCAmelCase__ : List[str]=24576 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Any=0.25 , lowerCAmelCase__ : str=10000 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : int , ) -> Tuple:
'''simple docstring'''
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , lowerCAmelCase__ )
_UpperCamelCase = self.rope_scaling.get('''factor''' , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 98 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = position
lowercase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ = []
for position in positions:
lowercase__ , lowercase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ , lowercase__ = position
if board[y][x] == 0:
lowercase__ = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowercase__ = 0
return False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowercase__ = 0
lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase_ ( __magic_name__ ):
def __get__( self , A , A=None ) -> List[Any]:
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
UpperCAmelCase : List[Any] = """__cached_""" + self.fget.__name__
UpperCAmelCase : Optional[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if cached is None:
UpperCAmelCase : str = self.fget(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return cached
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def __lowerCamelCase ( _lowercase ) -> List[str]:
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def __lowerCamelCase ( _lowercase ) -> Any:
return isinstance(__snake_case , np.ndarray )
def __lowerCamelCase ( _lowercase ) -> Dict:
return _is_numpy(__snake_case )
def __lowerCamelCase ( _lowercase ) -> int:
import torch
return isinstance(__snake_case , torch.Tensor )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(__snake_case )
def __lowerCamelCase ( _lowercase ) -> str:
import torch
return isinstance(__snake_case , torch.device )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_device(__snake_case )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
UpperCAmelCase : int = getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def __lowerCamelCase ( _lowercase ) -> Any:
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def __lowerCamelCase ( _lowercase ) -> str:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def __lowerCamelCase ( _lowercase ) -> List[str]:
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def __lowerCamelCase ( _lowercase ) -> str:
return False if not is_flax_available() else _is_jax(__snake_case )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __lowerCamelCase ( _lowercase ) -> Any:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__lowerCAmelCase ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self , class_fields[0].name )
UpperCAmelCase : List[str] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = first_field.items()
UpperCAmelCase : int = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__lowerCAmelCase )
UpperCAmelCase : Any = True
except TypeError:
UpperCAmelCase : int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (list, tuple) )
or not len(__lowerCAmelCase ) == 2
or not isinstance(element[0] , __lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase : int = element[1]
elif first_field is not None:
UpperCAmelCase : int = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self , field.name )
if v is not None:
UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *A , **A ) -> Dict:
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase( self , *A , **A ) -> List[Any]:
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase( self , *A , **A ) -> Union[str, Any]:
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase( self , *A , **A ) -> Any:
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self , A ) -> str:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Tuple = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A , A ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase )
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase )
def __setitem__( self , A , A ) -> Optional[int]:
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase( self ) -> str:
return tuple(self[k] for k in self.keys() )
class UpperCamelCase_ ( __magic_name__ , __magic_name__ ):
@classmethod
def _lowercase( cls , A ) -> int:
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'longest'
lowercase = 'max_length'
lowercase = 'do_not_pad'
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'pt'
lowercase = 'tf'
lowercase = 'np'
lowercase = 'jax'
class UpperCamelCase_ :
def __init__( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = context_managers
UpperCAmelCase : Optional[int] = ExitStack()
def __enter__( self ) -> str:
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCAmelCase )
def __exit__( self , *A , **A ) -> Dict:
self.stack.__exit__(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : List[str] = infer_framework(__snake_case )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(__snake_case )
if framework == "tf":
UpperCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __lowerCamelCase ( _lowercase , _lowercase = "" , _lowercase = "." ) -> Union[str, Any]:
def _flatten_dict(_lowercase , _lowercase="" , _lowercase="." ):
for k, v in d.items():
UpperCAmelCase : str = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def __lowerCamelCase ( _lowercase , _lowercase = False ) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(F'''Type not supported for transpose: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(F'''Type not supported for reshape: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> Union[str, Any]:
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(F'''Type not supported for squeeze: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
UpperCAmelCase : Dict = [F'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : str = F'''{repo_id}--{value}'''
return auto_map
def __lowerCamelCase ( _lowercase ) -> Dict:
for base_class in inspect.getmro(__snake_case ):
UpperCAmelCase : int = base_class.__module__
UpperCAmelCase : List[Any] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Any = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['ChineseCLIPFeatureExtractor']
A : List[Any] = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = 0
for ch in input_str:
UpperCamelCase__ = ord(UpperCamelCase__ )
UpperCamelCase__ = pow(2, UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 591 | from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [float('''inf''' )] * vertex_count
UpperCamelCase__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCamelCase__ = distance[u] + w
UpperCamelCase__ = check_negative_cycle(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input("""Enter number of vertices: """).strip())
lowercase = int(input("""Enter number of edges: """).strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
lowercase = int(input("""\nEnter shortest path source:""").strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 591 | 1 |
'''simple docstring'''
import os
def A__ ( A : str = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(A) , A)) as input_file:
UpperCamelCase : str = [
[int(A) for element in line.split(",")]
for line in input_file.readlines()
]
UpperCamelCase : Tuple = len(A)
UpperCamelCase : str = len(matrix[0])
UpperCamelCase : Union[str, Any] = [[-1 for _ in range(A)] for _ in range(A)]
for i in range(A):
UpperCamelCase : Dict = matrix[i][0]
for j in range(1 , A):
for i in range(A):
UpperCamelCase : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A):
UpperCamelCase : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
UpperCamelCase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 173 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def A__ ( A : np.ndarray):
'''simple docstring'''
return input_array.reshape((input_array.size, 1))
def A__ ( A : np.ndarray , A : np.ndarray , A : int):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.nan
for i in range(A):
UpperCamelCase : Union[str, Any] = features[:, labels == i]
UpperCamelCase : Union[str, Any] = data.mean(1)
# Centralize the data of class i
UpperCamelCase : List[Any] = data - column_reshape(A)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A , centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase : Tuple = np.dot(A , centered_data.T)
return covariance_sum / features.shape[1]
def A__ ( A : np.ndarray , A : np.ndarray , A : int):
'''simple docstring'''
UpperCamelCase : List[str] = features.mean(1)
UpperCamelCase : Union[str, Any] = np.nan
for i in range(A):
UpperCamelCase : List[Any] = features[:, labels == i]
UpperCamelCase : Optional[int] = data.shape[1]
UpperCamelCase : Any = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A) - column_reshape(A) , (column_reshape(A) - column_reshape(A)).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase : List[Any] = device_data * np.dot(
column_reshape(A) - column_reshape(A) , (column_reshape(A) - column_reshape(A)).T , )
return covariance_sum / features.shape[1]
def A__ ( A : np.ndarray , A : int):
'''simple docstring'''
if features.any():
UpperCamelCase : List[str] = features.mean(1)
# Center the dataset
UpperCamelCase : List[Any] = features - np.reshape(A , (data_mean.size, 1))
UpperCamelCase : str = np.dot(A , centered_data.T) / features.shape[1]
UpperCamelCase , UpperCamelCase : Dict = np.linalg.eigh(A)
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase : int = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase : Tuple = np.dot(filtered_eigenvectors.T , A)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A)
logging.error("Dataset empty")
raise AssertionError
def A__ ( A : np.ndarray , A : np.ndarray , A : int , A : int):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase , UpperCamelCase : Tuple = eigh(
covariance_between_classes(A , A , A) , covariance_within_classes(A , A , A) , )
UpperCamelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = np.linalg.svd(A)
UpperCamelCase : List[str] = svd_matrix[:, 0:dimensions]
UpperCamelCase : str = np.dot(filtered_svd_matrix.T , A)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A)
logging.error("Dataset empty")
raise AssertionError
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
UpperCamelCase : List[str] = np.array([0, 0, 0, 1, 1])
UpperCamelCase : Tuple = 2
UpperCamelCase : Optional[Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A) as error_info:
UpperCamelCase : Optional[int] = linear_discriminant_analysis(
A , A , A , A)
if isinstance(A , np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes")
assert error_info.type is AssertionError
def A__ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
UpperCamelCase : Dict = 2
UpperCamelCase : List[str] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]])
with pytest.raises(A) as error_info:
UpperCamelCase : Tuple = principal_component_analysis(A , A)
if not np.allclose(A , A):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : List[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''gpt_neox'''
def __init__( self : Optional[int] , __magic_name__ : Union[str, Any]=50_432 , __magic_name__ : Any=6_144 , __magic_name__ : Tuple=44 , __magic_name__ : Union[str, Any]=64 , __magic_name__ : Dict=24_576 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Dict=0.25 , __magic_name__ : Any=10_000 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=2_048 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=1e-5 , __magic_name__ : List[str]=True , __magic_name__ : Tuple=0 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=False , __magic_name__ : int=True , __magic_name__ : str=None , **__magic_name__ : Optional[int] , ) -> Union[str, Any]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = rotary_pct
SCREAMING_SNAKE_CASE_ = rotary_emb_base
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = classifier_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = tie_word_embeddings
SCREAMING_SNAKE_CASE_ = use_parallel_residual
SCREAMING_SNAKE_CASE_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __A ( self : List[str] ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE_ = self.rope_scaling.get("type" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.rope_scaling.get("factor" , __magic_name__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 356 | from collections.abc import Callable
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = a
SCREAMING_SNAKE_CASE_ = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCamelCase ) == 0:
return b
elif (
function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
SCREAMING_SNAKE_CASE_ = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(__UpperCamelCase ) == 0:
return mid
elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0:
SCREAMING_SNAKE_CASE_ = mid
else:
SCREAMING_SNAKE_CASE_ = mid
SCREAMING_SNAKE_CASE_ = start + (end - start) / 2.0
return mid
def a__ ( __UpperCamelCase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 356 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
__lowerCAmelCase = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase = {
'RUCAIBox/mvp': 1_0_2_4,
}
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Any = ['input_ids', 'attention_mask']
UpperCamelCase_ : Union[str, Any] = MvpTokenizer
def __init__( self :int , lowercase :Dict=None , lowercase :Any=None , lowercase :Optional[Any]=None , lowercase :List[str]="replace" , lowercase :Any="<s>" , lowercase :Dict="</s>" , lowercase :Union[str, Any]="</s>" , lowercase :int="<s>" , lowercase :Union[str, Any]="<unk>" , lowercase :List[Any]="<pad>" , lowercase :Optional[Any]="<mask>" , lowercase :Any=False , lowercase :int=True , **lowercase :Dict , ) -> Any:
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowercase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowercase )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = '''post_processor'''
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE = False
if state.get('''add_prefix_space''' , lowercase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('''trim_offsets''' , lowercase ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowercase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def snake_case__ ( self :str ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self :List[str] , lowercase :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
SCREAMING_SNAKE_CASE = value
def snake_case__ ( self :Union[str, Any] , *lowercase :Optional[int] , **lowercase :str ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('''is_split_into_words''' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase , **lowercase )
def snake_case__ ( self :Any , *lowercase :List[Any] , **lowercase :Optional[int] ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('''is_split_into_words''' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*lowercase , **lowercase )
def snake_case__ ( self :List[Any] , lowercase :str , lowercase :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def snake_case__ ( self :Optional[Any] , lowercase :List[Any] , lowercase :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self :List[str] , lowercase :List[int] , lowercase :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 201 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[Any] , lowercase :Optional[int]=1_3 , lowercase :Any=7 , lowercase :Tuple=True , lowercase :Optional[int]=True , lowercase :Any=False , lowercase :Any=True , lowercase :Dict=9_9 , lowercase :Dict=3_2 , lowercase :Any=5 , lowercase :Optional[Any]=4 , lowercase :List[str]=6_4 , lowercase :Optional[int]="gelu" , lowercase :int=0.1 , lowercase :str=0.1 , lowercase :List[str]=5_1_2 , lowercase :int=1_6 , lowercase :Any=2 , lowercase :Union[str, Any]=0.02 , lowercase :Optional[int]=3 , lowercase :Optional[Any]=4 , lowercase :Tuple=None , lowercase :int=2 , lowercase :Tuple=2 , lowercase :List[Any]=2 , lowercase :Optional[int]=2 , lowercase :Tuple=4 , lowercase :int=1 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def snake_case__ ( self :Optional[Any] , lowercase :Optional[Any] , lowercase :int , lowercase :Any , lowercase :List[str] , lowercase :Optional[Any] , lowercase :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
SCREAMING_SNAKE_CASE = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self :Dict , lowercase :Dict , lowercase :List[Any] , lowercase :str , lowercase :Union[str, Any] , lowercase :Dict , lowercase :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self :List[str] , lowercase :Optional[Any] , lowercase :Optional[int] , lowercase :str , lowercase :int , lowercase :Optional[Any] , lowercase :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self :Any , lowercase :Optional[Any] , lowercase :List[str] , lowercase :int , lowercase :Any , lowercase :Optional[int] , lowercase :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self :str , lowercase :List[Any] , lowercase :List[str] , lowercase :Optional[int] , lowercase :Tuple , lowercase :Tuple , lowercase :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self :int , lowercase :List[str] , lowercase :List[Any] , lowercase :Tuple , lowercase :str , lowercase :Optional[Any] , lowercase :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : int = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = True
UpperCamelCase_ : List[Any] = False
def snake_case__ ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase , dim=3_7 )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase )
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase )
def snake_case__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase )
def snake_case__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self :Dict ) -> str:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = model(lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase )
SCREAMING_SNAKE_CASE = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-4 ) ) | 201 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_a = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = defaultdict(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__snake_case )
_UpperCamelCase = new_doc_list
_UpperCamelCase = [key for key, value in counts.items() if value > 1]
_UpperCamelCase = []
for duplicate_key in duplicates:
_UpperCamelCase = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__snake_case ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
_UpperCamelCase = sorted(__snake_case, key=lambda __snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__snake_case ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__snake_case )
# Sort
return overview_doc
def lowerCamelCase__ ( __snake_case=False ) -> List[str]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCamelCase = content[api_idx]['''sections''']
# Then to the model doc
_UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_UpperCamelCase = api_doc[scheduler_idx]['''sections''']
_UpperCamelCase = clean_doc_toc(__snake_case )
_UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
_UpperCamelCase = True
if overwrite:
_UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
_UpperCamelCase = api_doc
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def lowerCamelCase__ ( __snake_case=False ) -> List[Any]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCamelCase = content[api_idx]['''sections''']
# Then to the model doc
_UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_UpperCamelCase = False
_UpperCamelCase = api_doc[pipeline_idx]['''sections''']
_UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_UpperCamelCase = pipeline_doc['''section''']
_UpperCamelCase = clean_doc_toc(__snake_case )
if overwrite:
_UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__snake_case )
# sort overall pipeline doc
_UpperCamelCase = clean_doc_toc(__snake_case )
if new_pipeline_docs != pipeline_docs:
_UpperCamelCase = True
if overwrite:
_UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
_UpperCamelCase = api_doc
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_a = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 78 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_a = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_a = F"""down_blocks.{i}.resnets.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_a = F"""down_blocks.{i}.attentions.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_a = F"""up_blocks.{i}.resnets.{j}."""
_a = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_a = F"""up_blocks.{i}.attentions.{j}."""
_a = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_a = F"""down_blocks.{i}.downsamplers.0.conv."""
_a = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_a = """mid_block.attentions.0."""
_a = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_a = F"""mid_block.resnets.{j}."""
_a = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_a = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_a = F"""encoder.down_blocks.{i}.resnets.{j}."""
_a = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_a = F"""down_blocks.{i}.downsamplers.0."""
_a = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_a = F"""decoder.up_blocks.{i}.resnets.{j}."""
_a = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_a = F"""mid_block.resnets.{i}."""
_a = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_a = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
return w.reshape(*w.shape, 1, 1 )
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_a = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_a = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_a = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_UpperCamelCase = k[: -len('''.q_proj.weight''' )]
_UpperCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_UpperCamelCase = k[: -len('''.q_proj.bias''' )]
_UpperCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
return new_state_dict
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_a = load_file(unet_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_a = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_a = load_file(vae_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_a = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_a = load_file(text_enc_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_a = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_a = convert_unet_state_dict(unet_state_dict)
_a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_a = convert_vae_state_dict(vae_state_dict)
_a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_a = convert_text_enc_state_dict_vaa(text_enc_dict)
_a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_a = convert_text_enc_state_dict(text_enc_dict)
_a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_a = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =AlbertTokenizer
SCREAMING_SNAKE_CASE__ =AlbertTokenizerFast
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = AlbertTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = "this is a test"
__SCREAMING_SNAKE_CASE = "this is a test"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "<pad>"
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ), _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ), _a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<pad>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "▁eloquent" )
self.assertEqual(len(_a ), 3_00_00 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 3_00_00 )
def __lowerCAmelCase ( self ) -> Any:
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_a )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a, _a )
__SCREAMING_SNAKE_CASE = tokenizer.encode(_a, add_special_tokens=_a )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_a, add_special_tokens=_a )
self.assertListEqual(_a, _a )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(_a )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_a )
self.assertListEqual(_a, _a )
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = AlbertTokenizer(_a, keep_accents=_a )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a, ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ), [48, 25, 21, 12_89] )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(_a, [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."], )
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = AlbertTokenizer(_a )
__SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_a )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_a, _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
# fmt: off
__SCREAMING_SNAKE_CASE = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a, model_name="albert-base-v2", revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e", )
| 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :List[str] = CpmAntTokenizer
A :Optional[int] = False
def _A ( self ):
"""simple docstring"""
super().setUp()
a__ : Tuple = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _A ( self ):
"""simple docstring"""
a__ : Any = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
a__ : List[str] = "今天天气真好!"
a__ : Any = ["今天", "天气", "真", "好", "!"]
a__ : int = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a__ : Union[str, Any] = "今天天气真好!"
a__ : Optional[Any] = [tokenizer.bos_token] + tokens
a__ : Union[str, Any] = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
a__ : Tuple = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 207 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 | 1 |
'''simple docstring'''
import os
import sys
import transformers
_lowerCAmelCase = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 161 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 0 |
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ):
"""simple docstring"""
UpperCamelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
UpperCamelCase = module._original_module if isinstance(__magic_name__ , _PatchedModuleObj ) else module
class UpperCAmelCase :
lowercase = []
def __init__( self : List[Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict=None ):
"""simple docstring"""
UpperCamelCase = obj
UpperCamelCase = target
UpperCamelCase = new
UpperCamelCase = target.split(""".""" )[0]
UpperCamelCase = {}
UpperCamelCase = attrs or []
def __enter__( self : Any ):
"""simple docstring"""
*UpperCamelCase , UpperCamelCase = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__magic_name__ ) ):
try:
UpperCamelCase = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCamelCase = getattr(self.obj , __magic_name__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__magic_name__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCamelCase = obj_attr
# patch at top level
setattr(self.obj , __magic_name__ , _PatchedModuleObj(__magic_name__ , attrs=self.attrs ) )
UpperCamelCase = getattr(self.obj , __magic_name__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__magic_name__ , __magic_name__ , _PatchedModuleObj(getattr(__magic_name__ , __magic_name__ , __magic_name__ ) , attrs=self.attrs ) )
UpperCamelCase = getattr(__magic_name__ , __magic_name__ )
# finally set the target attribute
setattr(__magic_name__ , __magic_name__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCamelCase = getattr(import_module(""".""".join(__magic_name__ ) ) , __magic_name__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __magic_name__ ) is attr_value:
UpperCamelCase = getattr(self.obj , __magic_name__ )
setattr(self.obj , __magic_name__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCamelCase = globals()["""__builtins__"""][target_attr]
setattr(self.obj , __magic_name__ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Union[str, Any] , *__magic_name__ : List[str] ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __magic_name__ , self.original.pop(__magic_name__ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 181 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 100 ):
"""simple docstring"""
UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 181 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Any = size if size is not None else {"""shortest_edge""": 224}
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Tuple = do_resize
snake_case : int = size
snake_case : int = resample
snake_case : int = do_center_crop
snake_case : Any = crop_size
snake_case : Optional[int] = do_rescale
snake_case : Dict = rescale_factor
snake_case : Dict = do_normalize
snake_case : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Any = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Any = do_convert_rgb
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[int] = size if size is not None else self.size
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : int = resample if resample is not None else self.resample
snake_case : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : Dict = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : List[str] = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : str = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : int = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
| 36 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =multiprocessing.Manager()
__UpperCAmelCase =manager.list()
__UpperCAmelCase =multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__UpperCAmelCase =shutil.rmtree
__UpperCAmelCase =os.rmdir
__UpperCAmelCase =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__UpperCAmelCase ={}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
__UpperCAmelCase =rmtree
__UpperCAmelCase =rmdir
__UpperCAmelCase =chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
def signal_handler(snake_case__ , snake_case__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__UpperCAmelCase =WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Dict:
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
pass
class _SCREAMING_SNAKE_CASE ( io.StringIO ):
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return False
class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
a_ : Dict = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
if root == ".":
yield
return
__UpperCAmelCase =os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__=None ) -> Tuple:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__UpperCAmelCase =None
__UpperCAmelCase =None
import os
__UpperCAmelCase ='''1'''
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import shutil
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import subprocess
__UpperCAmelCase =None # type: ignore
__UpperCAmelCase =None
import sys
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
| 132 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=lowercase__ ):
lowerCamelCase : List[Any] = ['''note_seq''']
def __init__( self : Union[str, Any] , *_lowercase : Any , **_lowercase : int ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *_lowercase : Tuple , **_lowercase : Union[str, Any] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def lowercase__ ( cls : Tuple , *_lowercase : Tuple , **_lowercase : str ):
requires_backends(cls , ['''note_seq'''] )
| 705 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
a_ :Union[str, Any] = 5
a_ :int = 10
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = SpeechaTextTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = True
def lowercase__ ( self : int ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Any = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : str = '''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowercase ) , 10_01 )
def lowercase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowercase__ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = {'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCamelCase : List[Any] = '''C\'est trop cool'''
lowerCamelCase : Any = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : str ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def lowercase__ ( self : Tuple ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : Optional[int] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''fr'''
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : int = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 250 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __magic_name__ :
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Union[str, Any]=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=5_12 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[Any]=None , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 37
UpperCAmelCase = "gelu"
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 5_12
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = None
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = TFRoFormerModel(config=__a )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(__a )
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = TFRoFormerForCausalLM(config=__a )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__a )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase = TFRoFormerForMaskedLM(config=__a )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFRoFormerForSequenceClassification(config=__a )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFRoFormerForMultipleChoice(config=__a )
UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFRoFormerForTokenClassification(config=__a )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase = TFRoFormerForQuestionAnswering(config=__a )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : List[Any] =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase : str =(
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : Optional[Any] =False
lowercase : str =False
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = TFRoFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__a )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__a )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(__a )[0]
# TODO Replace vocab size
UpperCAmelCase = 5_00_00
UpperCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , __a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
@require_tf
class __magic_name__ ( unittest.TestCase ):
lowercase : Any =1E-4
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = tf.constant([[4, 10]] )
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase = emba(input_ids.shape )
UpperCAmelCase = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(__a , __a , atol=self.tolerance )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
UpperCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(__a , __a , atol=self.tolerance )
@require_tf
class __magic_name__ ( unittest.TestCase ):
lowercase : int =1E-4
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
UpperCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
UpperCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCAmelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
UpperCAmelCase , UpperCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__a , __a , __a )
UpperCAmelCase = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
UpperCAmelCase = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __a , atol=self.tolerance )
| 323 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(__snake_case ):
_UpperCamelCase = y[k] + step_size * ode_func(__snake_case, y[k] )
_UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[str] = ['input_features', 'is_longer']
def __init__( self , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4_8000 , _SCREAMING_SNAKE_CASE=480 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1_4000 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fusion" , _SCREAMING_SNAKE_CASE = "repeatpad" , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = top_db
snake_case_ : List[str] = truncation
snake_case_ : Optional[int] = padding
snake_case_ : List[Any] = fft_window_size
snake_case_ : Dict = (fft_window_size >> 1) + 1
snake_case_ : Union[str, Any] = hop_length
snake_case_ : int = max_length_s
snake_case_ : Any = max_length_s * sampling_rate
snake_case_ : int = sampling_rate
snake_case_ : List[Any] = frequency_min
snake_case_ : str = frequency_max
snake_case_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=_SCREAMING_SNAKE_CASE , max_frequency=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , norm=_SCREAMING_SNAKE_CASE , mel_scale="htk" , )
snake_case_ : List[str] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=_SCREAMING_SNAKE_CASE , max_frequency=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def _lowerCAmelCase ( self ) -> Dict[str, Any]:
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> np.ndarray:
snake_case_ : str = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_SCREAMING_SNAKE_CASE , log_mel="dB" , )
return log_mel_spectrogram.T
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : Optional[int] = [0]
# randomly choose index for each part
snake_case_ : str = np.random.choice(ranges[0] )
snake_case_ : Tuple = np.random.choice(ranges[1] )
snake_case_ : List[Any] = np.random.choice(ranges[2] )
snake_case_ : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ : str = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ : int = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ : Tuple = torch.tensor(mel[None, None, :] )
snake_case_ : Optional[Any] = torch.nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = mel_shrink[0][0].numpy()
snake_case_ : List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ : Dict = len(_SCREAMING_SNAKE_CASE ) - max_length
snake_case_ : List[str] = np.random.randint(0 , overflow + 1 )
snake_case_ : str = waveform[idx : idx + max_length]
snake_case_ : Any = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ : Union[str, Any] = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters )
snake_case_ : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ : Any = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ : Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ : Optional[int] = False
else:
snake_case_ : Union[str, Any] = self._random_mel_fusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
snake_case_ : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ : str = int(max_length / len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Tuple = np.stack(np.tile(_SCREAMING_SNAKE_CASE , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ : List[str] = int(max_length / len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = np.stack(np.tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ : int = np.pad(_SCREAMING_SNAKE_CASE , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
snake_case_ : int = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters )
snake_case_ : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ : Dict = self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
snake_case_ : List[str] = truncation if truncation is not None else self.truncation
snake_case_ : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
snake_case_ : List[str] = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ : List[str] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case_ : List[str] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Tuple = [np.asarray(_SCREAMING_SNAKE_CASE )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ : Tuple = [
self._get_input_mel(_SCREAMING_SNAKE_CASE , max_length if max_length else self.nb_max_samples , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for waveform in raw_speech
]
snake_case_ : Union[str, Any] = []
snake_case_ : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(_SCREAMING_SNAKE_CASE )
is_longer.append(_SCREAMING_SNAKE_CASE )
if truncation == "fusion" and sum(_SCREAMING_SNAKE_CASE ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ : Tuple = np.random.randint(0 , len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Dict = True
if isinstance(input_mel[0] , _SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ : str = [[longer] for longer in is_longer]
snake_case_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
snake_case_ : Tuple = BatchFeature(_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
snake_case_ : Tuple = input_features.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return input_features
| 114 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 114 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCamelCase__ : List[str] = k_size // 2
UpperCamelCase__ : Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase__ : int = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase__ ) + square(lowerCamelCase__ )) / (2 * square(lowerCamelCase__ )) )
return g
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCamelCase__ : str = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase__ : int = height - k_size + 1
UpperCamelCase__ : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase__ : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase__ : Any = 0
for i, j in product(range(lowerCamelCase__ ) , range(lowerCamelCase__ ) ):
UpperCamelCase__ : str = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase__ : int = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase__ : Tuple = gen_gaussian_kernel(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = ravel(lowerCamelCase__ )
# reshape and get the dst image
UpperCamelCase__ : int = dot(lowerCamelCase__ , lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
return dst
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : Tuple = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase__ : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCAmelCase__ : int = gaussian_filter(gray, 3, sigma=1)
UpperCAmelCase__ : Dict = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 410 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
for char in word:
__SCREAMING_SNAKE_CASE : Optional[int] = ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
__SCREAMING_SNAKE_CASE : str = set()
for token in tokens:
__SCREAMING_SNAKE_CASE : Optional[int] = len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
__SCREAMING_SNAKE_CASE : str = list(_A )
return word_list
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: List[str] ):
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE : List[str] = max([len(_A ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE : Optional[Any] = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = 0, len(_A )
while start < end:
__SCREAMING_SNAKE_CASE : Tuple = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE : Any = min(end - start , _A )
for i in range(_A , 1 , -1 ):
__SCREAMING_SNAKE_CASE : List[Any] = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE : Tuple = """##""" + bert_word[j]
__SCREAMING_SNAKE_CASE : int = start + i
__SCREAMING_SNAKE_CASE : Tuple = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(0 , len(_A ) , 1_00 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["""cws"""] ).cws
__SCREAMING_SNAKE_CASE : Dict = [get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for i in range(0 , len(_A ) , 1_00 ):
__SCREAMING_SNAKE_CASE : Any = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_A , truncation=_A , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(_A ) == len(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for input_ids, chinese_word in zip(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = []
for id in input_ids:
__SCREAMING_SNAKE_CASE : Dict = bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
__SCREAMING_SNAKE_CASE : str = add_sub_symbol(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE : Tuple = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_ref(_A , _A , _A )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : str = [json.dumps(_A ) + """\n""" for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
main(args)
| 719 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : Any = '''▁'''
UpperCamelCase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Dict = BertGenerationTokenizer
_A : List[Any] = False
_A : Union[str, Any] = True
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Dict = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = """<s>"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = """Hello World!"""
__SCREAMING_SNAKE_CASE : Dict = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__SCREAMING_SNAKE_CASE : Optional[int] = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__SCREAMING_SNAKE_CASE : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__SCREAMING_SNAKE_CASE : int = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = BertGenerationConfig()
__SCREAMING_SNAKE_CASE : Any = BertGenerationEncoder(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , ) | 178 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.