code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :List[Any] , _lowercase :Dict , _lowercase :int=7 , _lowercase :Dict=3 , _lowercase :int=18 , _lowercase :Tuple=30 , _lowercase :Optional[Any]=400 , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , ) -> Any:
UpperCAmelCase_ = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __a ( self :Dict) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
UpperCAmelCase_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_).raw).convert('''RGB''')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a_ ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase__ : List[Any] =PixaStructImageProcessor if is_vision_available() else None
def __a ( self :List[str]) -> List[Any]:
UpperCAmelCase_ = PixaStructImageProcessingTester(self)
@property
def __a ( self :Dict) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb'''))
def __a ( self :int) -> List[str]:
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606) , atol=1E-3 , rtol=1E-3))
def __a ( self :Any) -> Dict:
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self :List[str]) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
UpperCAmelCase_ = '''Hello'''
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self :str) -> str:
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray)
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self :List[str]) -> Any:
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class a_ ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase__ : List[Any] =PixaStructImageProcessor if is_vision_available() else None
def __a ( self :int) -> str:
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4)
UpperCAmelCase_ = 3
@property
def __a ( self :Dict) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb'''))
def __a ( self :int) -> List[str]:
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a_ ( unittest.TestCase ):
UpperCamelCase__ : List[Any] =MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase__ : Any =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __a ( self :Tuple) -> str:
UpperCAmelCase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''')
# Using `do_sample=False` to force deterministic output
UpperCAmelCase_ = text_generator('''This is a test''' , do_sample=a_)
self.assertEqual(
a_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
UpperCAmelCase_ = text_generator(['''This is a test''', '''This is a second test'''])
self.assertEqual(
a_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
UpperCAmelCase_ = text_generator('''This is a test''' , do_sample=a_ , num_return_sequences=2 , return_tensors=a_)
self.assertEqual(
a_ , [
{'''generated_token_ids''': ANY(a_)},
{'''generated_token_ids''': ANY(a_)},
] , )
UpperCAmelCase_ = text_generator.model.config.eos_token_id
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , )
self.assertEqual(
a_ , [
[
{'''generated_token_ids''': ANY(a_)},
{'''generated_token_ids''': ANY(a_)},
],
[
{'''generated_token_ids''': ANY(a_)},
{'''generated_token_ids''': ANY(a_)},
],
] , )
@require_tf
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''')
# Using `do_sample=False` to force deterministic output
UpperCAmelCase_ = text_generator('''This is a test''' , do_sample=a_)
self.assertEqual(
a_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
UpperCAmelCase_ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=a_)
self.assertEqual(
a_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __a ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Any , _lowercase :List[str]) -> Optional[int]:
UpperCAmelCase_ = TextGenerationPipeline(model=a_ , tokenizer=a_)
return text_generator, ["This is a test", "Another test"]
def __a ( self :List[Any]) -> Optional[int]:
UpperCAmelCase_ = '''Hello I believe in'''
UpperCAmelCase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''')
UpperCAmelCase_ = text_generator(a_)
self.assertEqual(
a_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
UpperCAmelCase_ = text_generator(a_ , stop_sequence=''' fe''')
self.assertEqual(a_ , [{'''generated_text''': '''Hello I believe in fe'''}])
def __a ( self :int , _lowercase :Dict , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = text_generator.model
UpperCAmelCase_ = text_generator.tokenizer
UpperCAmelCase_ = text_generator('''This is a test''')
self.assertEqual(a_ , [{'''generated_text''': ANY(a_)}])
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test'''))
UpperCAmelCase_ = text_generator('''This is a test''' , return_full_text=a_)
self.assertEqual(a_ , [{'''generated_text''': ANY(a_)}])
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''])
UpperCAmelCase_ = pipeline(task='''text-generation''' , model=a_ , tokenizer=a_ , return_full_text=a_)
UpperCAmelCase_ = text_generator('''This is a test''')
self.assertEqual(a_ , [{'''generated_text''': ANY(a_)}])
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''])
UpperCAmelCase_ = text_generator('''This is a test''' , return_full_text=a_)
self.assertEqual(a_ , [{'''generated_text''': ANY(a_)}])
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test'''))
UpperCAmelCase_ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a_)
self.assertEqual(
a_ , [
[{'''generated_text''': ANY(a_)}, {'''generated_text''': ANY(a_)}],
[{'''generated_text''': ANY(a_)}, {'''generated_text''': ANY(a_)}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase_ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a_)
self.assertEqual(
a_ , [
[{'''generated_text''': ANY(a_)}, {'''generated_text''': ANY(a_)}],
[{'''generated_text''': ANY(a_)}, {'''generated_text''': ANY(a_)}],
] , )
with self.assertRaises(a_):
UpperCAmelCase_ = text_generator('''test''' , return_full_text=a_ , return_text=a_)
with self.assertRaises(a_):
UpperCAmelCase_ = text_generator('''test''' , return_full_text=a_ , return_tensors=a_)
with self.assertRaises(a_):
UpperCAmelCase_ = text_generator('''test''' , return_text=a_ , return_tensors=a_)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase_ = text_generator('''''')
self.assertEqual(a_ , [{'''generated_text''': ANY(a_)}])
else:
with self.assertRaises((ValueError, AssertionError)):
UpperCAmelCase_ = text_generator('''''')
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase_ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator('''This is a test''' * 500 , max_new_tokens=20)
UpperCAmelCase_ = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(a_):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __a ( self :Optional[Any]) -> int:
import torch
# Classic `model_kwargs`
UpperCAmelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
UpperCAmelCase_ = pipe('''This is a test''')
self.assertEqual(
a_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
UpperCAmelCase_ = pipe('''This is a test''')
self.assertEqual(
a_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''')
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
UpperCAmelCase_ = pipe('''This is a test''')
self.assertEqual(
a_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __a ( self :Tuple) -> Any:
import torch
UpperCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa)
pipe('''This is a test''')
@require_torch
@require_accelerate
@require_torch_gpu
def __a ( self :Any) -> Optional[Any]:
import torch
UpperCAmelCase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa)
pipe('''This is a test''' , do_sample=a_ , top_p=0.5)
def __a ( self :List[Any]) -> Optional[int]:
UpperCAmelCase_ = '''Hello world'''
UpperCAmelCase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''')
if text_generator.model.framework == "tf":
UpperCAmelCase_ = logging.get_logger('''transformers.generation.tf_utils''')
else:
UpperCAmelCase_ = logging.get_logger('''transformers.generation.utils''')
UpperCAmelCase_ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a_) as cl:
UpperCAmelCase_ = text_generator(a_ , max_length=10 , max_new_tokens=1)
self.assertIn(a_ , cl.out)
# The user only sets one -> no warning
with CaptureLogger(a_) as cl:
UpperCAmelCase_ = text_generator(a_ , max_new_tokens=1)
self.assertNotIn(a_ , cl.out)
with CaptureLogger(a_) as cl:
UpperCAmelCase_ = text_generator(a_ , max_length=10)
self.assertNotIn(a_ , cl.out)
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
from __future__ import annotations
UpperCamelCase_ = []
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if row >= len(lowerCAmelCase__ ):
solution.append(lowerCAmelCase__ )
printboard(lowerCAmelCase__ )
print()
return True
for i in range(len(lowerCAmelCase__ ) ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = 1
solve(lowerCAmelCase__ , row + 1 )
UpperCAmelCase_ = 0
return False
def A ( __UpperCAmelCase ) -> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(len(lowerCAmelCase__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
UpperCamelCase_ = 8
UpperCamelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self :Any , _lowercase :int , _lowercase :str=3 , _lowercase :Union[str, Any]=32 , _lowercase :Any=3 , _lowercase :Dict=10 , _lowercase :Any=[10, 20, 30, 40] , _lowercase :str=[1, 1, 2, 1] , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Union[str, Any]="relu" , _lowercase :List[Any]=3 , _lowercase :Union[str, Any]=None , ) -> Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def __a ( self :Tuple) -> str:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :List[str]) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self :Dict , _lowercase :int , _lowercase :Optional[int] , _lowercase :Tuple) -> int:
UpperCAmelCase_ = RegNetModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :str , _lowercase :List[str] , _lowercase :Optional[int] , _lowercase :Tuple) -> Dict:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = RegNetForImageClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :int) -> Any:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : int =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Optional[int] =(
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[str] =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Any =False
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = RegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def __a ( self :Optional[Any]) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :List[str]) -> Any:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def __a ( self :Union[str, Any]) -> Union[str, Any]:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def __a ( self :List[Any]) -> str:
pass
def __a ( self :Tuple) -> int:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=_snake_case)
for name, module in model.named_modules():
if isinstance(_snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __a ( self :Tuple) -> str:
def check_hidden_states_output(_lowercase :Optional[int] , _lowercase :List[Any] , _lowercase :List[Any]):
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@slow
def __a ( self :Dict) -> Optional[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = RegNetModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def A ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :Any) -> str:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __a ( self :int) -> Union[str, Any]:
UpperCAmelCase_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''pt''').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = torch.tensor([-0.4_180, -1.5_051, -3.4_836]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4))
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase_ = "\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCamelCase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCamelCase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def __a ( self :Any) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence'''),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence''') , id='''references'''),
}) , )
def __a ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :Optional[int] = 1 , _lowercase :List[str] = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowercase , hypotheses=_lowercase , min_len=_lowercase , max_len=_lowercase)
}
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
from __future__ import annotations
from typing import Any
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not postfix_notation:
return 0
UpperCAmelCase_ = {'''+''', '''-''', '''*''', '''/'''}
UpperCAmelCase_ = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase_ , UpperCAmelCase_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class a_ ( unittest.TestCase ):
def __a ( self :Dict) -> List[str]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_SCREAMING_SNAKE_CASE) as mock_head:
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''')
# This check we did call the fake head request
mock_head.assert_called()
def __a ( self :Dict) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''')
def __a ( self :Any) -> str:
with self.assertRaises(_SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''')
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''')
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@is_staging_test
class a_ ( unittest.TestCase ):
@classmethod
def __a ( cls :List[str]) -> str:
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE)
@classmethod
def __a ( cls :int) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''')
except HTTPError:
pass
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE)
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token)
UpperCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''test-image-processor''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token)
UpperCAmelCase_ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
def __a ( self :List[Any]) -> Optional[int]:
UpperCAmelCase_ = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE)
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token)
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''')
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token)
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''')
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
def __a ( self :Union[str, Any]) -> Dict:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE)
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor" , trust_remote_code=_SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''')
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
UpperCamelCase_ = {"target_lang": "fi", "source_lang": "en"}
UpperCamelCase_ = ">>zh<<"
UpperCamelCase_ = "Helsinki-NLP/"
if is_torch_available():
UpperCamelCase_ = "pt"
elif is_tf_available():
UpperCamelCase_ = "tf"
else:
UpperCamelCase_ = "jax"
@require_sentencepiece
class a_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase__ : int =MarianTokenizer
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : Tuple =True
def __a ( self :List[str]) -> List[str]:
super().setUp()
UpperCAmelCase_ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''])
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''])
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''])
UpperCAmelCase_ = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int] , **_lowercase :Optional[int]) -> Optional[int]:
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __a ( self :Dict , _lowercase :Tuple) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def __a ( self :Any) -> List[Any]:
UpperCAmelCase_ = "</s>"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def __a ( self :List[Any]) -> List[str]:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(UpperCAmelCase_) , 9)
def __a ( self :Dict) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de")
UpperCAmelCase_ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
UpperCAmelCase_ = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(UpperCAmelCase_ , batch.input_ids[0])
UpperCAmelCase_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCAmelCase_)
UpperCAmelCase_ = [x.name for x in Path(UpperCAmelCase_).glob('''*''')]
self.assertIn('''source.spm''' , UpperCAmelCase_)
MarianTokenizer.from_pretrained(UpperCAmelCase_)
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(batch.input_ids.shape , (2, 512))
def __a ( self :int) -> Any:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def __a ( self :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''')
UpperCAmelCase_ = "Tämä on testi"
UpperCAmelCase_ = "This is a test"
UpperCAmelCase_ = [76, 7, 2047, 2]
UpperCAmelCase_ = [69, 12, 11, 940, 2]
UpperCAmelCase_ = tokenizer(UpperCAmelCase_).input_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase_).input_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCAmelCase_ = tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase_ = "."
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
UpperCamelCase_ = []
UpperCamelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase_ = line.strip()
UpperCamelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase_ = "\n".join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
_lowerCAmelCase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
_lowerCAmelCase = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCAmelCase_ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import string
import numpy
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class a_ :
UpperCamelCase__ : Any =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase__ : List[str] =numpy.vectorize(lambda _snake_case : x % 36 )
UpperCamelCase__ : Optional[Any] =numpy.vectorize(A_ )
def __init__( self :List[Any] , _lowercase :numpy.ndarray) -> Union[str, Any]:
UpperCAmelCase_ = self.modulus(_lowerCamelCase) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase_ = encrypt_key.shape[0]
def __a ( self :List[Any] , _lowercase :str) -> int:
return self.key_string.index(_lowerCamelCase)
def __a ( self :List[Any] , _lowercase :int) -> List[Any]:
return self.key_string[round(_lowerCamelCase)]
def __a ( self :Tuple) -> Any:
UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
UpperCAmelCase_ = det % len(self.key_string)
UpperCAmelCase_ = len(self.key_string)
if greatest_common_divisor(_lowerCamelCase , len(self.key_string)) != 1:
UpperCAmelCase_ = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_lowerCamelCase)
def __a ( self :Tuple , _lowercase :str) -> Any:
UpperCAmelCase_ = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase_ = chars[-1]
while len(_lowerCamelCase) % self.break_key != 0:
chars.append(_lowerCamelCase)
return "".join(_lowerCamelCase)
def __a ( self :int , _lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.process_text(text.upper())
UpperCAmelCase_ = ''''''
for i in range(0 , len(_lowerCamelCase) - self.break_key + 1 , self.break_key):
UpperCAmelCase_ = text[i : i + self.break_key]
UpperCAmelCase_ = [self.replace_letters(_lowerCamelCase) for char in batch]
UpperCAmelCase_ = numpy.array([vec]).T
UpperCAmelCase_ = self.modulus(self.encrypt_key.dot(_lowerCamelCase)).T.tolist()[
0
]
UpperCAmelCase_ = ''''''.join(
self.replace_digits(_lowerCamelCase) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
UpperCAmelCase_ = det % len(self.key_string)
UpperCAmelCase_ = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
UpperCAmelCase_ = i
break
UpperCAmelCase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(_lowerCamelCase))
def __a ( self :Optional[Any] , _lowercase :str) -> Dict:
UpperCAmelCase_ = self.make_decrypt_key()
UpperCAmelCase_ = self.process_text(text.upper())
UpperCAmelCase_ = ''''''
for i in range(0 , len(_lowerCamelCase) - self.break_key + 1 , self.break_key):
UpperCAmelCase_ = text[i : i + self.break_key]
UpperCAmelCase_ = [self.replace_letters(_lowerCamelCase) for char in batch]
UpperCAmelCase_ = numpy.array([vec]).T
UpperCAmelCase_ = self.modulus(decrypt_key.dot(_lowerCamelCase)).T.tolist()[0]
UpperCAmelCase_ = ''''''.join(
self.replace_digits(_lowerCamelCase) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = int(input('''Enter the order of the encryption key: ''' ) )
UpperCAmelCase_ = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__lowerCamelCase ):
UpperCAmelCase_ = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
UpperCAmelCase_ = HillCipher(numpy.array(__lowerCamelCase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
UpperCAmelCase_ = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
UpperCAmelCase_ = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
UpperCAmelCase_ = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import os
def A ( __UpperCAmelCase = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
UpperCAmelCase_ = [
[int(_snake_case ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCAmelCase_ = len(_snake_case )
UpperCAmelCase_ = len(matrix[0] )
UpperCAmelCase_ = [[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
UpperCAmelCase_ = matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
UpperCAmelCase_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
UpperCAmelCase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self :int , _lowercase :Union[str, Any] , _lowercase :Any=13 , _lowercase :Optional[int]=7 , _lowercase :Any=True , _lowercase :Union[str, Any]=True , _lowercase :int=True , _lowercase :Dict=True , _lowercase :str=99 , _lowercase :List[Any]=16 , _lowercase :int=36 , _lowercase :Tuple=6 , _lowercase :List[str]=6 , _lowercase :Dict=6 , _lowercase :Dict=37 , _lowercase :Dict="gelu" , _lowercase :str=0.1 , _lowercase :Optional[Any]=0.1 , _lowercase :int=512 , _lowercase :Optional[Any]=16 , _lowercase :List[Any]=2 , _lowercase :Optional[Any]=0.02 , _lowercase :List[str]=3 , _lowercase :Tuple=4 , _lowercase :Optional[int]=None , ) -> Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def __a ( self :str) -> Tuple:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self :Any) -> List[str]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = AlbertModel(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a)
UpperCAmelCase_ = model(_a , token_type_ids=_a)
UpperCAmelCase_ = model(_a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __a ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Dict , _lowercase :List[Any] , _lowercase :Optional[int] , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = AlbertForPreTraining(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , sentence_order_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def __a ( self :List[str] , _lowercase :str , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Any , _lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = AlbertForMaskedLM(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :Any , _lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Any , _lowercase :Optional[int] , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Tuple) -> int:
UpperCAmelCase_ = AlbertForQuestionAnswering(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __a ( self :str , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Dict , _lowercase :str , _lowercase :Tuple , _lowercase :Dict , _lowercase :str) -> Optional[int]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :Tuple , _lowercase :Tuple , _lowercase :Tuple , _lowercase :Any , _lowercase :List[str] , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Any) -> Dict:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __a ( self :Tuple , _lowercase :int , _lowercase :Tuple , _lowercase :Tuple , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Optional[Any] , _lowercase :Tuple) -> Any:
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=_a)
model.to(_a)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __a ( self :Union[str, Any]) -> Optional[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _a , _a , unittest.TestCase ):
UpperCamelCase__ : Optional[int] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : int =(
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =True
def __a ( self :List[Any] , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :int=False) -> Union[str, Any]:
UpperCAmelCase_ = super()._prepare_for_class(_a , _a , return_labels=_a)
if return_labels:
if model_class in get_values(_a):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a)
return inputs_dict
def __a ( self :int) -> Dict:
UpperCAmelCase_ = AlbertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_a , hidden_size=37)
def __a ( self :Tuple) -> Optional[int]:
self.config_tester.run_common_tests()
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a)
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a)
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a)
def __a ( self :Tuple) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a)
def __a ( self :str) -> Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a)
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a)
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_a)
@slow
def __a ( self :Optional[int]) -> List[str]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(_a)
self.assertIsNotNone(_a)
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :str) -> str:
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_a , attention_mask=_a)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , _a)
UpperCAmelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4))
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
class a_ :
"""simple docstring"""
def __init__( self :Optional[Any] , _lowercase :int , _lowercase :str=None , _lowercase :Union[str, Any]=None) -> str:
UpperCAmelCase_ = data
UpperCAmelCase_ = previous
UpperCAmelCase_ = next_node
def __str__( self :List[Any]) -> int:
return f"{self.data}"
def __a ( self :Optional[int]) -> str:
return self.data
def __a ( self :Any) -> int:
return self.next
def __a ( self :Union[str, Any]) -> str:
return self.previous
class a_ :
"""simple docstring"""
def __init__( self :List[str] , _lowercase :List[str]) -> Tuple:
UpperCAmelCase_ = head
def __iter__( self :int) -> List[str]:
return self
def __a ( self :Union[str, Any]) -> List[Any]:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ = self.current.get_data()
UpperCAmelCase_ = self.current.get_next()
return value
class a_ :
"""simple docstring"""
def __init__( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = None # First node in list
UpperCAmelCase_ = None # Last node in list
def __str__( self :Any) -> List[Any]:
UpperCAmelCase_ = self.head
UpperCAmelCase_ = []
while current is not None:
nodes.append(current.get_data())
UpperCAmelCase_ = current.get_next()
return " ".join(str(snake_case_) for node in nodes)
def __contains__( self :Optional[Any] , _lowercase :int) -> List[str]:
UpperCAmelCase_ = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ = current.get_next()
return False
def __iter__( self :str) -> Any:
return LinkedListIterator(self.head)
def __a ( self :List[Any]) -> Any:
if self.head:
return self.head.get_data()
return None
def __a ( self :Optional[int]) -> Any:
if self.tail:
return self.tail.get_data()
return None
def __a ( self :List[str] , _lowercase :Node) -> List[Any]:
if self.head is None:
UpperCAmelCase_ = node
UpperCAmelCase_ = node
else:
self.insert_before_node(self.head , snake_case_)
def __a ( self :List[str] , _lowercase :Node) -> Optional[int]:
if self.head is None:
self.set_head(snake_case_)
else:
self.insert_after_node(self.tail , snake_case_)
def __a ( self :Union[str, Any] , _lowercase :int) -> Union[str, Any]:
UpperCAmelCase_ = Node(snake_case_)
if self.head is None:
self.set_head(snake_case_)
else:
self.set_tail(snake_case_)
def __a ( self :List[Any] , _lowercase :Node , _lowercase :Node) -> Optional[Any]:
UpperCAmelCase_ = node
UpperCAmelCase_ = node.previous
if node.get_previous() is None:
UpperCAmelCase_ = node_to_insert
else:
UpperCAmelCase_ = node_to_insert
UpperCAmelCase_ = node_to_insert
def __a ( self :str , _lowercase :Node , _lowercase :Node) -> List[str]:
UpperCAmelCase_ = node
UpperCAmelCase_ = node.next
if node.get_next() is None:
UpperCAmelCase_ = node_to_insert
else:
UpperCAmelCase_ = node_to_insert
UpperCAmelCase_ = node_to_insert
def __a ( self :Dict , _lowercase :int , _lowercase :int) -> Union[str, Any]:
UpperCAmelCase_ = 1
UpperCAmelCase_ = Node(snake_case_)
UpperCAmelCase_ = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_)
return
current_position += 1
UpperCAmelCase_ = node.next
self.insert_after_node(self.tail , snake_case_)
def __a ( self :Dict , _lowercase :int) -> List[Any]:
UpperCAmelCase_ = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ = node.get_next()
raise Exception('''Node not found''')
def __a ( self :str , _lowercase :str) -> List[Any]:
if (node := self.get_node(snake_case_)) is not None:
if node == self.head:
UpperCAmelCase_ = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ = self.tail.get_previous()
self.remove_node_pointers(snake_case_)
@staticmethod
def __a ( _lowercase :Node) -> List[str]:
if node.get_next():
UpperCAmelCase_ = node.previous
if node.get_previous():
UpperCAmelCase_ = node.next
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __a ( self :Dict) -> List[Any]:
return self.head is None
def A ( ) -> None:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a_ ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase__ : str =WavaVecaPhonemeCTCTokenizer
UpperCamelCase__ : str =False
def __a ( self :List[str]) -> int:
super().setUp()
UpperCAmelCase_ = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''')
UpperCAmelCase_ = dict(zip(__lowercase , range(len(__lowercase))))
UpperCAmelCase_ = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
def __a ( self :List[str] , _lowercase :Dict , _lowercase :List[Any]=False , _lowercase :Union[str, Any]=20 , _lowercase :Union[str, Any]=5) -> Any:
UpperCAmelCase_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase)) for i in range(len(__lowercase))]
UpperCAmelCase_ = list(filter(lambda _lowercase: [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowercase) , __lowercase))
if max_length is not None and len(__lowercase) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(__lowercase) < min_length and len(__lowercase) > 0:
while len(__lowercase) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase)
if " " not in output_txt and len(__lowercase) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(__lowercase , add_special_tokens=__lowercase)
return output_txt, output_ids
def __a ( self :int , **_lowercase :List[Any]) -> str:
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def __a ( self :Any) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
# check adding a single token
tokenizer.add_tokens('''xxx''')
UpperCAmelCase_ = tokenizer('''m xxx ɪ''' , do_phonemize=__lowercase).input_ids
self.assertEqual(__lowercase , [13, 392, 17]) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''])
UpperCAmelCase_ = tokenizer('''m aaa ɪ ccc''' , do_phonemize=__lowercase).input_ids
self.assertEqual(__lowercase , [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase_ = tokenizer('''maɪ c''' , do_phonemize=__lowercase).input_ids
self.assertEqual(__lowercase , [3, 200]) # mai should be <unk> (=3)
def __a ( self :Dict) -> int:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
self.assertEqual(__lowercase , '''h ə l oʊ h aʊ ɑːɹ j uː''')
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
self.assertEqual(tokenizer(__lowercase).input_ids , tokenizer(__lowercase , do_phonemize=__lowercase).input_ids)
def __a ( self :Dict) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
UpperCAmelCase_ = tokenizer.decode(tokenizer(__lowercase).input_ids)
self.assertEqual(__lowercase , __lowercase)
def __a ( self :List[Any]) -> List[str]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase_ = tokenizer.decode(sample_ids[0])
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase)
self.assertEqual(__lowercase , batch_tokens[0])
self.assertEqual(__lowercase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''])
def __a ( self :List[str]) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
self.assertEqual(__lowercase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''')
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
self.assertEqual(tokenizer(__lowercase).input_ids , tokenizer(__lowercase , do_phonemize=__lowercase).input_ids)
def __a ( self :List[str]) -> List[str]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase_ = tokenizer.decode(sample_ids[0])
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase)
self.assertEqual(__lowercase , batch_tokens[0])
self.assertEqual(__lowercase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''])
# decode with no word_del_token filter
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase , filter_word_delimiter_token=__lowercase)
self.assertEqual(__lowercase , batch_tokens[0])
self.assertEqual(__lowercase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
UpperCAmelCase_ = tokenizer.decode(tokenizer(__lowercase).input_ids , filter_word_delimiter_token=__lowercase)
self.assertEqual(__lowercase , __lowercase)
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''')
UpperCAmelCase_ = tokenizer.decode(tokenizer(__lowercase).input_ids , filter_word_delimiter_token=__lowercase)
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''')]).strip() , __lowercase)
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__lowercase)
UpperCAmelCase_ = '''Hello how are you'''
UpperCAmelCase_ = tokenizer(__lowercase , phonemizer_lang='''en-us''').input_ids
UpperCAmelCase_ = tokenizer(__lowercase , phonemizer_lang='''fr-fr''').input_ids
self.assertNotEqual(__lowercase , __lowercase)
UpperCAmelCase_ = tokenizer.decode(__lowercase)
UpperCAmelCase_ = tokenizer.decode(__lowercase)
self.assertEqual(__lowercase , '''h ə l oʊ h aʊ ɑːɹ j uː''')
self.assertEqual(__lowercase , '''ɛ l o h aʊ a ʁ j u''')
def __a ( self :int) -> Any:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
UpperCAmelCase_ = '''Hello how Are you'''
UpperCAmelCase_ = '''hello how are you'''
UpperCAmelCase_ = tokenizer(__lowercase).input_ids
UpperCAmelCase_ = tokenizer(__lowercase).input_ids
self.assertEqual(__lowercase , __lowercase)
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''')
tokenizer.add_tokens(['''!''', '''?'''])
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''})
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase)
self.assertEqual(__lowercase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''])
@staticmethod
def __a ( _lowercase :Optional[int] , _lowercase :str) -> str:
UpperCAmelCase_ = [d[key] for d in offsets]
return retrieved_list
def __a ( self :Dict) -> int:
UpperCAmelCase_ = self.get_tokenizer(word_delimiter_token='''|''')
tokenizer.add_tokens('''|''')
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase_ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase_ = tokenizer.decode(__lowercase , output_char_offsets=__lowercase , filter_word_delimiter_token=__lowercase)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue('''text''' in outputs)
self.assertTrue('''char_offsets''' in outputs)
self.assertTrue(isinstance(__lowercase , __lowercase))
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''')) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''') , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''') , [0, 1, 4, 7, 9, 11, 12, 15, 16])
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''') , [1, 4, 6, 9, 10, 12, 15, 16, 17])
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = self.get_tokenizer(word_delimiter_token='''|''')
def check_list_tuples_equal(_lowercase :str , _lowercase :int):
self.assertTrue(isinstance(__lowercase , __lowercase))
self.assertTrue(isinstance(outputs_list[0] , __lowercase))
# transform list to ModelOutput
UpperCAmelCase_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''])
def recursive_check(_lowercase :Union[str, Any] , _lowercase :Any):
if isinstance(__lowercase , __lowercase):
[recursive_check(__lowercase , __lowercase) for la, la in zip(__lowercase , __lowercase)]
self.assertEqual(__lowercase , __lowercase)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''])
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase , output_char_offsets=__lowercase)
UpperCAmelCase_ = [tokenizer.decode(__lowercase , output_char_offsets=__lowercase) for ids in sample_ids]
check_list_tuples_equal(__lowercase , __lowercase)
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''')
def __a ( self :Union[str, Any]) -> str:
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''')
def __a ( self :Optional[Any]) -> Dict:
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''')
def __a ( self :List[Any]) -> int:
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''')
def __a ( self :Optional[Any]) -> Any:
pass
def __a ( self :Tuple) -> List[Any]:
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__lowercase)
self.assertNotEqual(__lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase_ = tokenizer.add_tokens(__lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__lowercase)
self.assertNotEqual(__lowercase , 0)
self.assertEqual(__lowercase , __lowercase)
self.assertEqual(__lowercase , len(__lowercase))
self.assertEqual(__lowercase , all_size + len(__lowercase))
UpperCAmelCase_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase)
self.assertGreaterEqual(len(__lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase_ = tokenizer.add_special_tokens(__lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__lowercase)
self.assertNotEqual(__lowercase , 0)
self.assertEqual(__lowercase , __lowercase)
self.assertEqual(__lowercase , len(__lowercase))
self.assertEqual(__lowercase , all_size_a + len(__lowercase))
UpperCAmelCase_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase)
self.assertGreaterEqual(len(__lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''')
def __a ( self :Optional[Any]) -> Union[str, Any]:
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''')
def __a ( self :Tuple) -> Optional[int]:
pass
def __a ( self :Union[str, Any]) -> Union[str, Any]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase_ = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(__lowercase)
self.assertIsInstance(output['''text'''] , __lowercase)
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Any , _lowercase :Union[str, Any] , _lowercase :List[str]=7 , _lowercase :List[Any]=3 , _lowercase :int=30 , _lowercase :Any=400 , _lowercase :List[str]=True , _lowercase :Any=None , _lowercase :Union[str, Any]=True , _lowercase :Union[str, Any]=[0.5, 0.5, 0.5] , _lowercase :List[Any]=[0.5, 0.5, 0.5] , _lowercase :str=True , _lowercase :Optional[int]=1 / 255 , _lowercase :Dict=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def __a ( self :List[str]) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self :Optional[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any]=False) -> Tuple:
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w)
UpperCAmelCase_ = self.size['shortest_edge']
elif w > h:
UpperCAmelCase_ = self.size['shortest_edge']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h)
else:
UpperCAmelCase_ = self.size['shortest_edge']
UpperCAmelCase_ = self.size['shortest_edge']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =ConditionalDetrImageProcessor if is_vision_available() else None
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = ConditionalDetrImageProcessingTester(self)
@property
def __a ( self :Union[str, Any]) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :str) -> Dict:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333})
self.assertEqual(image_processor.do_pad , _lowercase)
UpperCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , _lowercase)
def __a ( self :Union[str, Any]) -> Optional[int]:
pass
def __a ( self :Tuple) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :int) -> int:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Optional[Any]) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self :Tuple) -> int:
# prepare image and target
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
UpperCAmelCase_ = json.loads(f.read())
UpperCAmelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''')
UpperCAmelCase_ = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='''pt''')
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4))
# verify area
UpperCAmelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase))
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3))
# verify image_id
UpperCAmelCase_ = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase))
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase))
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase))
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase))
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase))
@slow
def __a ( self :int) -> Union[str, Any]:
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
UpperCAmelCase_ = json.loads(f.read())
UpperCAmelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor(format='''coco_panoptic''')
UpperCAmelCase_ = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='''pt''')
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4))
# verify area
UpperCAmelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase))
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3))
# verify image_id
UpperCAmelCase_ = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase))
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase))
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase))
# verify masks
UpperCAmelCase_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowercase)
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase))
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase))
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase_ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = """cpu"""
UpperCamelCase_ = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
UpperCamelCase_ = """path-to-your-trained-model"""
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase_ = pipe.to(device)
# to channels last
UpperCamelCase_ = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase_ = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase_ = torch.randn(2, 4, 64, 64)
UpperCamelCase_ = torch.rand(1) * 999
UpperCamelCase_ = torch.randn(2, 77, 768)
UpperCamelCase_ = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase_ = 666
UpperCamelCase_ = torch.Generator(device).manual_seed(seed)
UpperCamelCase_ = {"""generator""": generator}
if args.steps is not None:
UpperCamelCase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
def A ( __UpperCAmelCase = 100_0000 ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
UpperCAmelCase_ = [float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }") | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase_ = "base_with_context"
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase_ = weights[f"layers_{lyr_num}"]
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = ly_weight['''attention''']
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase_ = weights[f"layers_{lyr_num}"]
UpperCAmelCase_ = ly_weight['''attention''']
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase_ = weights[f"layers_{lyr_num}"]
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCAmelCase_ = ly_weight['''self_attention''']
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase_ = ly_weight['''MultiHeadDotProductAttention_0''']
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
UpperCAmelCase_ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase_ = jnp.tree_util.tree_map(onp.array , _lowerCAmelCase )
UpperCAmelCase_ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
UpperCAmelCase_ = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
UpperCAmelCase_ = inference.parse_training_gin_file(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ = inference.InferenceModel(args.checkpoint_path , _lowerCAmelCase )
UpperCAmelCase_ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
UpperCAmelCase_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCAmelCase_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCAmelCase_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase_ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , _lowerCAmelCase )
UpperCAmelCase_ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , _lowerCAmelCase )
UpperCAmelCase_ = load_decoder(ta_checkpoint['''target''']['''decoder'''] , _lowerCAmelCase )
UpperCAmelCase_ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
UpperCAmelCase_ = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase , continuous_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase , scheduler=_lowerCAmelCase , melgan=_lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
UpperCamelCase_ = parser.parse_args()
main(args)
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
UpperCAmelCase_ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase_ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_A , output_all_encodings=_A , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase_ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase_ = os.path.join(get_home_dir() , '''models''' )
UpperCAmelCase_ = _load_vocab(_A , _A , _A , cls=_A )
UpperCAmelCase_ = nlp.model.BERTModel(
_A , len(_A ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_A , use_token_type_embed=_A , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_A , use_decoder=_A , )
original_bort.load_parameters(_A , cast_dtype=_A , ignore_extra=_A )
UpperCAmelCase_ = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase_ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_A ),
}
UpperCAmelCase_ = BertConfig.from_dict(_A )
UpperCAmelCase_ = BertForMaskedLM(_A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__UpperCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = hf_param.shape
UpperCAmelCase_ = to_torch(params[gluon_param] )
UpperCAmelCase_ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
UpperCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
UpperCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
UpperCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
UpperCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase_ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase_ = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase_ = layer.attention.self
UpperCAmelCase_ = check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
UpperCAmelCase_ = check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
UpperCAmelCase_ = check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
UpperCAmelCase_ = check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
UpperCAmelCase_ = check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
UpperCAmelCase_ = check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
UpperCAmelCase_ = layer.attention.output
UpperCAmelCase_ = check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
UpperCAmelCase_ = check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
UpperCAmelCase_ = check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
UpperCAmelCase_ = check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
UpperCAmelCase_ = layer.intermediate
UpperCAmelCase_ = check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
UpperCAmelCase_ = check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
UpperCAmelCase_ = layer.output
UpperCAmelCase_ = check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
UpperCAmelCase_ = check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
UpperCAmelCase_ = check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
UpperCAmelCase_ = check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase_ = RobertaTokenizer.from_pretrained('''roberta-base''' )
UpperCAmelCase_ = tokenizer.encode_plus(_A )['''input_ids''']
# Get gluon output
UpperCAmelCase_ = mx.nd.array([input_ids] )
UpperCAmelCase_ = original_bort(inputs=_A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_A )
UpperCAmelCase_ = BertModel.from_pretrained(_A )
hf_bort_model.eval()
UpperCAmelCase_ = tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ = hf_bort_model(**_A )[0]
UpperCAmelCase_ = output_gluon[0].asnumpy()
UpperCAmelCase_ = output_hf[0].detach().numpy()
UpperCAmelCase_ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase_ = np.allclose(_A , _A , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
from collections.abc import Callable
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(__UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCAmelCase ) == 0:
return b
elif (
function(__UpperCAmelCase ) * function(__UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCAmelCase ) == 0:
return mid
elif function(__UpperCAmelCase ) * function(__UpperCAmelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class a_ :
UpperCamelCase__ : str =field(
metadata={"help": "The csv file to plot."} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
UpperCamelCase__ : Optional[str] =field(
default=__lowerCAmelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
UpperCamelCase__ : Optional[List[str]] =list_field(
default=__lowerCAmelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
try:
int(__UpperCAmelCase )
return True
except ValueError:
return False
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
try:
float(__UpperCAmelCase )
return True
except ValueError:
return False
class a_ :
def __init__( self :Any , _lowercase :Dict) -> Dict:
UpperCAmelCase_ = args
UpperCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline='''''') as csv_file:
UpperCAmelCase_ = csv.DictReader(lowerCamelCase__)
for row in reader:
UpperCAmelCase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size''']))
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length''']))
if can_convert_to_int(row['''result''']):
# value is not None
UpperCAmelCase_ = int(row['''result'''])
elif can_convert_to_float(row['''result''']):
# value is not None
UpperCAmelCase_ = float(row['''result'''])
def __a ( self :List[Any]) -> List[Any]:
UpperCAmelCase_ = plt.subplots()
UpperCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''')
ax.set_yscale('''log''')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
UpperCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz''']))
UpperCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len''']))
UpperCAmelCase_ = self.result_dict[model_name]['''result''']
(UpperCAmelCase_) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase__ , )
else:
UpperCAmelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(UpperCAmelCase_) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase_ = np.asarray(lowerCamelCase__ , lowerCamelCase__)[: len(lowerCamelCase__)]
plt.scatter(
lowerCamelCase__ , lowerCamelCase__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(lowerCamelCase__ , lowerCamelCase__ , '''--''')
title_str += f" {label_model_name} vs."
UpperCAmelCase_ = title_str[:-4]
UpperCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(lowerCamelCase__)
plt.xlabel(lowerCamelCase__)
plt.ylabel(lowerCamelCase__)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser(__UpperCAmelCase )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ = Plot(args=__UpperCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"unc-nlp/lxmert-base-uncased": 512,
}
UpperCamelCase_ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class a_ ( lowerCamelCase__ ):
UpperCamelCase__ : Any =VOCAB_FILES_NAMES
UpperCamelCase__ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict =LxmertTokenizer
def __init__( self :Dict , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :Dict=True , _lowercase :Any="[UNK]" , _lowercase :Union[str, Any]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :List[Any]="[CLS]" , _lowercase :Union[str, Any]="[MASK]" , _lowercase :Tuple=True , _lowercase :Union[str, Any]=None , **_lowercase :int , ) -> Optional[Any]:
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , lowercase__) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase__) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase__) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowercase__ , normalizer_state.pop('''type'''))
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowercase__)
UpperCAmelCase_ = do_lower_case
def __a ( self :int , _lowercase :int , _lowercase :str=None) -> str:
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self :Tuple , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> int:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Optional[str] = None) -> Optional[int]:
UpperCAmelCase_ = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
from collections import defaultdict
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCAmelCase )
if ret % 2 == 0:
cuts.append(_UpperCAmelCase )
return ret
def A ( ) -> Any:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase_ = k.replace(a__ , a__ )
return k
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
UpperCAmelCase_ = DEFAULTS.copy()
cfg_kwargs.update(a__ )
UpperCAmelCase_ = PegasusConfig(**a__ )
UpperCAmelCase_ = PegasusForConditionalGeneration(a__ )
UpperCAmelCase_ = torch_model.model.state_dict()
UpperCAmelCase_ = {}
for k, v in tf_weights.items():
UpperCAmelCase_ = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase_ = v.T
UpperCAmelCase_ = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase_ = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
UpperCAmelCase_ = mapping['''shared.weight''']
UpperCAmelCase_ = mapping['''shared.weight''']
UpperCAmelCase_ = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
UpperCAmelCase_ , UpperCAmelCase_ = torch_model.model.load_state_dict(a__ , strict=a__ )
UpperCAmelCase_ = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( __UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = tf.train.list_variables(a__ )
UpperCAmelCase_ = {}
UpperCAmelCase_ = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ = tf.train.load_variable(a__ , a__ )
UpperCAmelCase_ = array
return tf_weights
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = Path(a__ ).parent.name
UpperCAmelCase_ = task_specific_params[f"summarization_{dataset}"]['''max_position_embeddings''']
UpperCAmelCase_ = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
UpperCAmelCase_ = get_tf_weights_as_numpy(a__ )
UpperCAmelCase_ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase_ = task_specific_params
UpperCAmelCase_ = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
UpperCAmelCase_ = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
move_disk(__lowerCAmelCase , __lowerCAmelCase )
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
print('''moving disk from''' , __lowerCAmelCase , '''to''' , __lowerCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__lowerCAmelCase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
def A ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase_ = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase_ = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase_ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
UpperCamelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase_ = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class a_ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] ="data2vec-audio"
def __init__( self :Optional[Any] , _lowercase :Optional[Any]=32 , _lowercase :List[Any]=768 , _lowercase :List[Any]=12 , _lowercase :str=12 , _lowercase :List[str]=3072 , _lowercase :Union[str, Any]="gelu" , _lowercase :Optional[int]=0.1 , _lowercase :Tuple=0.1 , _lowercase :List[Any]=0.1 , _lowercase :Dict=0.0 , _lowercase :Dict=0.1 , _lowercase :List[str]=0.1 , _lowercase :int=0.02 , _lowercase :Tuple=1E-5 , _lowercase :Optional[Any]="gelu" , _lowercase :Dict=(512, 512, 512, 512, 512, 512, 512) , _lowercase :Tuple=(5, 2, 2, 2, 2, 2, 2) , _lowercase :int=(10, 3, 3, 3, 3, 2, 2) , _lowercase :Any=False , _lowercase :Dict=16 , _lowercase :Dict=19 , _lowercase :Optional[Any]=5 , _lowercase :Union[str, Any]=0.05 , _lowercase :int=10 , _lowercase :str=2 , _lowercase :str=0.0 , _lowercase :Tuple=10 , _lowercase :Optional[Any]=0 , _lowercase :int="sum" , _lowercase :Optional[int]=False , _lowercase :List[Any]=False , _lowercase :Optional[int]=256 , _lowercase :List[Any]=(512, 512, 512, 512, 1500) , _lowercase :Dict=(5, 3, 3, 1, 1) , _lowercase :Optional[Any]=(1, 2, 3, 1, 1) , _lowercase :Optional[int]=512 , _lowercase :Optional[int]=0 , _lowercase :Optional[Any]=1 , _lowercase :Any=2 , _lowercase :str=False , _lowercase :Tuple=3 , _lowercase :Optional[int]=2 , _lowercase :Optional[int]=3 , _lowercase :Tuple=None , **_lowercase :Tuple , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = list(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = list(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = conv_pos_kernel_size
UpperCAmelCase_ = len(self.conv_dim)
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# adapter
UpperCAmelCase_ = add_adapter
UpperCAmelCase_ = adapter_kernel_size
UpperCAmelCase_ = adapter_stride
UpperCAmelCase_ = num_adapter_layers
UpperCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ = list(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = list(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = list(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = xvector_output_dim
@property
def __a ( self :str) -> Optional[Any]:
return math.prod(self.conv_stride)
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"unc-nlp/lxmert-base-uncased": 512,
}
UpperCamelCase_ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class a_ ( __snake_case ):
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str =LxmertTokenizer
def __init__( self :Dict , _lowercase :List[str]=None , _lowercase :Any=None , _lowercase :int=True , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :Optional[int]="[PAD]" , _lowercase :List[Any]="[CLS]" , _lowercase :Any="[MASK]" , _lowercase :Union[str, Any]=True , _lowercase :Tuple=None , **_lowercase :Union[str, Any] , ) -> str:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , lowerCamelCase_) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase_) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCamelCase_ , normalizer_state.pop('''type'''))
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCamelCase_)
UpperCAmelCase_ = do_lower_case
def __a ( self :Optional[int] , _lowercase :List[str] , _lowercase :List[str]=None) -> Optional[Any]:
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self :List[str] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __a ( self :Tuple , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
UpperCAmelCase_ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_)
return tuple(lowerCamelCase_)
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :Optional[Any]=7 , _lowercase :List[Any]=3 , _lowercase :Tuple=30 , _lowercase :Optional[int]=400 , _lowercase :Any=True , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=True , _lowercase :int=[0.5, 0.5, 0.5] , _lowercase :Any=[0.5, 0.5, 0.5] , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=1 / 255 , _lowercase :int=True , ) -> List[str]:
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def __a ( self :Optional[Any]) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Optional[int]=False) -> List[Any]:
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w)
UpperCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h)
else:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = self.size['''shortest_edge''']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_UpperCAmelCase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_UpperCAmelCase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =DetaImageProcessor if is_vision_available() else None
def __a ( self :List[str]) -> Optional[Any]:
UpperCAmelCase_ = DetaImageProcessingTester(self)
@property
def __a ( self :Dict) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> str:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean'''))
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std'''))
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize'''))
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize'''))
self.assertTrue(hasattr(_UpperCAmelCase , '''do_rescale'''))
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad'''))
self.assertTrue(hasattr(_UpperCAmelCase , '''size'''))
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
def __a ( self :str) -> Dict:
pass
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :int) -> Dict:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self :List[str]) -> Any:
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
UpperCAmelCase_ = json.loads(f.read())
UpperCAmelCase_ = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCAmelCase_ = DetaImageProcessor()
UpperCAmelCase_ = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''')
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase)
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4))
# verify area
UpperCAmelCase_ = torch.tensor([5_887.9_600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase))
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase)
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1E-3))
# verify image_id
UpperCAmelCase_ = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase))
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase))
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase))
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase))
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase))
@slow
def __a ( self :List[Any]) -> Optional[Any]:
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
UpperCAmelCase_ = json.loads(f.read())
UpperCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
UpperCAmelCase_ = DetaImageProcessor(format='''coco_panoptic''')
UpperCAmelCase_ = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''')
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase)
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4))
# verify area
UpperCAmelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase))
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase)
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1E-3))
# verify image_id
UpperCAmelCase_ = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase))
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase))
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase))
# verify masks
UpperCAmelCase_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase)
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase))
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase))
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
from __future__ import annotations
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> tuple[float, list[float]]:
'''simple docstring'''
UpperCAmelCase_ = list(range(len(__UpperCAmelCase ) ) )
UpperCAmelCase_ = [v / w for v, w in zip(__UpperCAmelCase , __UpperCAmelCase )]
index.sort(key=lambda __UpperCAmelCase : ratio[i] , reverse=__UpperCAmelCase )
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0] * len(__UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =StableDiffusionPanoramaPipeline
UpperCamelCase__ : Union[str, Any] =TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : Tuple =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self :List[str]) -> Union[str, Any]:
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler()
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(lowercase_)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self :int , _lowercase :Optional[int] , _lowercase :List[Any]=0) -> List[str]:
UpperCAmelCase_ = torch.manual_seed(lowercase_)
UpperCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :Tuple) -> Any:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**lowercase_)
UpperCAmelCase_ = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
UpperCAmelCase_ = self.get_dummy_inputs(lowercase_)
UpperCAmelCase_ = sd_pipe(**lowercase_).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __a ( self :List[Any]) -> Optional[int]:
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def __a ( self :List[str]) -> Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3)
def __a ( self :Optional[int]) -> int:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**lowercase_)
UpperCAmelCase_ = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
UpperCAmelCase_ = self.get_dummy_inputs(lowercase_)
UpperCAmelCase_ = '''french fries'''
UpperCAmelCase_ = sd_pipe(**lowercase_ , negative_prompt=lowercase_)
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**lowercase_)
UpperCAmelCase_ = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
UpperCAmelCase_ = self.get_dummy_inputs(lowercase_)
UpperCAmelCase_ = sd_pipe(**lowercase_ , view_batch_size=2)
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**lowercase_)
UpperCAmelCase_ = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
UpperCAmelCase_ = self.get_dummy_inputs(lowercase_)
UpperCAmelCase_ = sd_pipe(**lowercase_).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=lowercase_)
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**lowercase_)
UpperCAmelCase_ = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
UpperCAmelCase_ = self.get_dummy_inputs(lowercase_)
UpperCAmelCase_ = sd_pipe(**lowercase_).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :Optional[int]) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] , _lowercase :Any=0) -> Optional[int]:
UpperCAmelCase_ = torch.manual_seed(lowercase_)
UpperCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**lowercase_).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCAmelCase_ = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
])
assert np.abs(expected_slice - image_slice).max() < 1E-2
def __a ( self :Tuple) -> List[Any]:
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=lowercase_)
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**lowercase_).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCAmelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __a ( self :Any) -> Any:
UpperCAmelCase_ = 0
def callback_fn(_lowercase :int , _lowercase :int , _lowercase :torch.FloatTensor) -> None:
UpperCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
UpperCAmelCase_ = False
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
UpperCAmelCase_ = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self :Optional[Any]) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
UpperCAmelCase_ = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**lowercase_)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
UpperCAmelCase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase_ = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def A ( __UpperCAmelCase = "dhaka" , __UpperCAmelCase = 5 ) -> str:
'''simple docstring'''
UpperCAmelCase_ = min(__UpperCAmelCase , 50 ) # Prevent abuse!
UpperCAmelCase_ = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCAmelCase_ = requests.get('''https://www.google.com/search''' , params=__UpperCAmelCase , headers=__UpperCAmelCase )
UpperCAmelCase_ = BeautifulSoup(html.text , '''html.parser''' )
UpperCAmelCase_ = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCAmelCase_ = json.dumps(__UpperCAmelCase )
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
UpperCAmelCase_ = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __UpperCAmelCase , )
if not matched_google_image_data:
return 0
UpperCAmelCase_ = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__UpperCAmelCase ) , )
UpperCAmelCase_ = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __UpperCAmelCase , )
for index, fixed_full_res_image in enumerate(__UpperCAmelCase ):
if index >= max_images:
return index
UpperCAmelCase_ = bytes(__UpperCAmelCase , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCAmelCase_ = bytes(__UpperCAmelCase , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCAmelCase_ = urllib.request.build_opener()
UpperCAmelCase_ = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__UpperCAmelCase )
UpperCAmelCase_ = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__UpperCAmelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
UpperCamelCase_ = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self :Optional[int] , _lowercase :Any , _lowercase :List[Any]=3 , _lowercase :Any=32 , _lowercase :str=3 , _lowercase :Union[str, Any]=10 , _lowercase :List[Any]=[10, 20, 30, 40] , _lowercase :str=[1, 1, 2, 1] , _lowercase :List[str]=True , _lowercase :Dict=True , _lowercase :Any="relu" , _lowercase :List[Any]=3 , _lowercase :Optional[Any]=None , ) -> int:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_lowercase)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :List[Any]) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self :Union[str, Any] , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Any) -> List[Any]:
UpperCAmelCase_ = TFResNetModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :int , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :List[str]) -> Any:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFResNetForImageClassification(_lowercase)
UpperCAmelCase_ = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : str =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : List[str] =False
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = TFResNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase)
def __a ( self :List[Any]) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :List[str]) -> int:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''')
def __a ( self :int) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''')
def __a ( self :int) -> int:
pass
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :str) -> Optional[int]:
def check_hidden_states_output(_lowercase :str , _lowercase :Tuple , _lowercase :List[Any]):
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_lowercase) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
@slow
def __a ( self :Dict) -> str:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFResNetModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :Optional[int]) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''tf''')
# forward pass
UpperCAmelCase_ = model(**_lowercase)
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4))
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
def A ( ) -> int:
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_SCREAMING_SNAKE_CASE , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase_ = TypeVar("T")
UpperCamelCase_ = Union[List[T], Tuple[T, ...]]
UpperCamelCase_ = Union[T, List[T], Dict[str, T]]
UpperCamelCase_ = Union[str, bytes, os.PathLike]
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
from functools import reduce
UpperCamelCase_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def A ( __UpperCAmelCase = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCAmelCase , __UpperCAmelCase : str(int(UpperCAmelCase_ ) * int(UpperCAmelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCAmelCase_ ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }") | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class a_ ( unittest.TestCase ):
def __a ( self :int) -> Any:
UpperCAmelCase_ = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
# restore to the original level
logging.set_verbosity(_lowercase)
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = logging.get_verbosity()
UpperCAmelCase_ = logging.get_logger('''transformers.models.bart.tokenization_bart''')
UpperCAmelCase_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowercase) as cl:
logger.warning(_lowercase)
self.assertEqual(cl.out , msg + '''\n''')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowercase) as cl:
logger.warning(_lowercase)
self.assertEqual(cl.out , '''''')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowercase) as cl:
logger.warning(_lowercase)
self.assertEqual(cl.out , msg + '''\n''')
# restore to the original level
logging.set_verbosity(_lowercase)
@mockenv(TRANSFORMERS_VERBOSITY='''error''')
def __a ( self :str) -> List[Any]:
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ = logging.get_logger('''transformers.models.bart.tokenization_bart''')
UpperCAmelCase_ = os.getenv('''TRANSFORMERS_VERBOSITY''' , _lowercase)
UpperCAmelCase_ = logging.log_levels[env_level_str]
UpperCAmelCase_ = logging.get_verbosity()
self.assertEqual(
_lowercase , _lowercase , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
UpperCAmelCase_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''')
def __a ( self :Dict) -> int:
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ = logging.logging.getLogger()
with CaptureLogger(_lowercase) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''')
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out)
# no need to restore as nothing was changed
def __a ( self :int) -> List[Any]:
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ = logging.get_logger('''transformers.models.bart.tokenization_bart''')
UpperCAmelCase_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1'''):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowercase) as cl:
logger.warning_advice(_lowercase)
self.assertEqual(cl.out , '''''')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''''''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowercase) as cl:
logger.warning_advice(_lowercase)
self.assertEqual(cl.out , msg + '''\n''')
def A ( ) -> Dict:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A ( ) -> Any:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase_ = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , __UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A ( ) -> List[str]:
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase_ = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , __UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , __UpperCAmelCase ):
pass
def A ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , __UpperCAmelCase ) is None
with patch_submodule(_test_patching , '''len''' , __UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = '''__test_patch_submodule_start_and_stop_mock__'''
UpperCAmelCase_ = patch_submodule(_test_patching , '''open''' , __UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A ( ) -> Any:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase_ = '''__test_patch_submodule_successive_join__'''
UpperCAmelCase_ = '''__test_patch_submodule_successive_dirname__'''
UpperCAmelCase_ = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , __UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.rename''' , __UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , __UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , __UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __UpperCAmelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __UpperCAmelCase ):
pass
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
UpperCamelCase__ : List[Any] =None
def A ( __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
import pyspark
def generate_fn():
UpperCAmelCase_ = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCAmelCase_ = df_with_partition_id.select('''*''' ).where(f"part_id = {partition_id}" ).drop('''part_id''' )
UpperCAmelCase_ = partition_df.collect()
UpperCAmelCase_ = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
def __init__( self :Optional[int] , _lowercase :"pyspark.sql.DataFrame" , _lowercase :Dict=None , ) -> Tuple:
UpperCAmelCase_ = df
UpperCAmelCase_ = partition_order or range(self.df.rdd.getNumPartitions())
UpperCAmelCase_ = _generate_iterable_examples(self.df , self.partition_order)
def __iter__( self :Tuple) -> List[Any]:
yield from self.generate_examples_fn()
def __a ( self :List[Any] , _lowercase :np.random.Generator) -> Optional[int]:
UpperCAmelCase_ = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(lowercase_)
return SparkExamplesIterable(self.df , partition_order=lowercase_)
def __a ( self :int , _lowercase :int , _lowercase :int) -> Union[str, Any]:
UpperCAmelCase_ = self.split_shard_indices_by_worker(lowercase_ , lowercase_)
return SparkExamplesIterable(self.df , partition_order=lowercase_)
@property
def __a ( self :str) -> Any:
return len(self.partition_order)
class a_ ( datasets.DatasetBuilder ):
UpperCamelCase__ : Dict =SparkConfig
def __init__( self :List[str] , _lowercase :"pyspark.sql.DataFrame" , _lowercase :str = None , _lowercase :str = None , **_lowercase :Tuple , ) -> Dict:
import pyspark
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ = df
UpperCAmelCase_ = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash()) , **lowercase_ , )
def __a ( self :Dict) -> List[str]:
# Returns the path of the created file.
def create_cache_and_write_probe(_lowercase :List[Any]):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_)
UpperCAmelCase_ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , '''a''')
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''').startswith('''local'''):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ = (
self._spark.sparkContext.parallelize(range(1) , 1).mapPartitions(lowercase_).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''')
def __a ( self :Any) -> str:
return datasets.DatasetInfo(features=self.config.features)
def __a ( self :Any , _lowercase :datasets.download.download_manager.DownloadManager) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def __a ( self :Optional[int] , _lowercase :List[str]) -> List[Any]:
import pyspark
def get_arrow_batch_size(_lowercase :str):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]})
UpperCAmelCase_ = self.df.count()
UpperCAmelCase_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ = (
self.df.limit(lowercase_)
.repartition(1)
.mapInArrow(lowercase_ , '''batch_bytes: long''')
.agg(pyspark.sql.functions.sum('''batch_bytes''').alias('''sample_bytes'''))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ = min(lowercase_ , int(approx_total_size / max_shard_size))
UpperCAmelCase_ = self.df.repartition(lowercase_)
def __a ( self :Any , _lowercase :str , _lowercase :str , _lowercase :int , ) -> Optional[int]:
import pyspark
UpperCAmelCase_ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCAmelCase_ = os.path.join(self._working_dir , os.path.basename(lowercase_)) if self._working_dir else fpath
UpperCAmelCase_ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ = self.config.features
UpperCAmelCase_ = self._writer_batch_size
UpperCAmelCase_ = self._fs.storage_options
def write_arrow(_lowercase :Any):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ = next(lowercase_ , lowercase_)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
UpperCAmelCase_ = 0
UpperCAmelCase_ = writer_class(
features=lowercase_ , path=working_fpath.replace('''SSSSS''' , f"{shard_id:05d}").replace('''TTTTT''' , f"{task_id:05d}") , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
UpperCAmelCase_ = pa.Table.from_batches([first_batch])
writer.write_table(lowercase_)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_ , UpperCAmelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
UpperCAmelCase_ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"{shard_id:05d}").replace('''TTTTT''' , f"{task_id:05d}") , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
UpperCAmelCase_ = pa.Table.from_batches([batch])
writer.write_table(lowercase_)
if writer._num_bytes > 0:
UpperCAmelCase_ , UpperCAmelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_)):
UpperCAmelCase_ = os.path.join(os.path.dirname(lowercase_) , os.path.basename(lowercase_))
shutil.move(lowercase_ , lowercase_)
UpperCAmelCase_ = (
self.df.mapInArrow(lowercase_ , '''task_id: long, num_examples: long, num_bytes: long''')
.groupBy('''task_id''')
.agg(
pyspark.sql.functions.sum('''num_examples''').alias('''total_num_examples''') , pyspark.sql.functions.sum('''num_bytes''').alias('''total_num_bytes''') , pyspark.sql.functions.count('''num_bytes''').alias('''num_shards''') , pyspark.sql.functions.collect_list('''num_examples''').alias('''shard_lengths''') , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __a ( self :Tuple , _lowercase :"datasets.SplitGenerator" , _lowercase :str = "arrow" , _lowercase :Optional[Union[str, int]] = None , _lowercase :Optional[int] = None , **_lowercase :Tuple , ) -> List[str]:
self._validate_cache_dir()
UpperCAmelCase_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(lowercase_)
UpperCAmelCase_ = not is_remote_filesystem(self._fs)
UpperCAmelCase_ = os.path.join if is_local else posixpath.join
UpperCAmelCase_ = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCAmelCase_ = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCAmelCase_ = path_join(self._output_dir , lowercase_)
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(lowercase_)
UpperCAmelCase_ = total_num_examples
UpperCAmelCase_ = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
UpperCAmelCase_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_lowercase :int , _lowercase :int , _lowercase :int , ):
rename(
lowercase_ , fpath.replace('''SSSSS''' , f"{shard_id:05d}").replace('''TTTTT''' , f"{task_id:05d}") , fpath.replace('''TTTTT-SSSSS''' , f"{global_shard_id:05d}").replace('''NNNNN''' , f"{total_shards:05d}") , )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(len(lowercase_)):
UpperCAmelCase_ , UpperCAmelCase_ = task_id_and_num_shards[i]
for shard_id in range(lowercase_):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_)).map(lambda _lowercase: _rename_shard(*lowercase_)).collect()
else:
# don't use any pattern
UpperCAmelCase_ = 0
UpperCAmelCase_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"{shard_id:05d}").replace('''TTTTT''' , f"{task_id:05d}") , fpath.replace(lowercase_ , '''''') , )
def __a ( self :Tuple , _lowercase :"datasets.SplitGenerator" , ) -> Optional[Any]:
return SparkExamplesIterable(self.df)
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
from __future__ import annotations
import time
UpperCamelCase_ = list[tuple[int, int]]
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a_ :
def __init__( self :Tuple , _lowercase :Optional[int] , _lowercase :List[Any] , _lowercase :Any , _lowercase :Tuple , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = parent
class a_ :
def __init__( self :Optional[int] , _lowercase :List[Any] , _lowercase :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase__)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase__)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = False
def __a ( self :List[Any]) -> List[Any]:
while self.node_queue:
UpperCAmelCase_ = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
UpperCAmelCase_ = True
return self.retrace_path(lowerCamelCase__)
UpperCAmelCase_ = self.get_successors(lowerCamelCase__)
for node in successors:
self.node_queue.append(lowerCamelCase__)
if not self.reached:
return [self.start.pos]
return None
def __a ( self :Tuple , _lowercase :Any) -> str:
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCamelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , lowerCamelCase__))
return successors
def __a ( self :Optional[int] , _lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self :int , _lowercase :Any , _lowercase :int) -> Optional[int]:
UpperCAmelCase_ = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__)
UpperCAmelCase_ = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__)
UpperCAmelCase_ = False
def __a ( self :int) -> Union[str, Any]:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_ = self.fwd_bfs.node_queue.pop(0)
UpperCAmelCase_ = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_ = True
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase__),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __a ( self :Dict , _lowercase :Optional[Any] , _lowercase :Dict) -> Any:
UpperCAmelCase_ = self.fwd_bfs.retrace_path(lowerCamelCase__)
UpperCAmelCase_ = self.bwd_bfs.retrace_path(lowerCamelCase__)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = BreadthFirstSearch(init, goal)
UpperCamelCase_ = bfs.search()
UpperCamelCase_ = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalBreadthFirstSearch(init, goal)
UpperCamelCase_ = bd_bfs.search()
UpperCamelCase_ = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
import numpy as np
from PIL import Image
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# compute the shape of the output matrix
UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCAmelCase_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCAmelCase_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
return updated_arr
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# compute the shape of the output matrix
UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCAmelCase_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCAmelCase_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
UpperCamelCase_ = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase_ = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCAmelCase : i.created_at , reverse=__UpperCAmelCase )
UpperCAmelCase_ = comments[0] if len(__UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
def __init__( self :List[str] , _lowercase :Any , _lowercase :Tuple=13 , _lowercase :Union[str, Any]=7 , _lowercase :Dict=True , _lowercase :str=True , _lowercase :int=False , _lowercase :Optional[Any]=True , _lowercase :Dict=99 , _lowercase :Dict=32 , _lowercase :Optional[int]=5 , _lowercase :Optional[int]=4 , _lowercase :str=37 , _lowercase :List[Any]="gelu" , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :List[Any]=512 , _lowercase :Tuple=16 , _lowercase :List[Any]=2 , _lowercase :List[Any]=0.02 , _lowercase :List[str]=3 , _lowercase :str=4 , _lowercase :str=None , ) -> Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self :Optional[int]) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __a ( self :List[str] , _lowercase :List[str] , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = LlamaModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)
UpperCAmelCase_ = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :List[str] , _lowercase :List[Any] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :Union[str, Any] , ) -> List[Any]:
UpperCAmelCase_ = True
UpperCAmelCase_ = LlamaModel(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :Dict , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Any , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :int , _lowercase :int , ) -> str:
UpperCAmelCase_ = LlamaForCausalLM(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :Any , _lowercase :Optional[Any] , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Optional[Any] , _lowercase :int , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :Dict , _lowercase :Dict , ) -> List[Any]:
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = LlamaForCausalLM(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
# first forward pass
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['''hidden_states'''][0]
UpperCAmelCase_ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3))
def __a ( self :Dict) -> List[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : int =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] =(
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any =False
UpperCamelCase__ : int =False
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = LlamaModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37)
def __a ( self :Union[str, Any]) -> Optional[int]:
self.config_tester.run_common_tests()
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*lowerCamelCase_)
def __a ( self :Any) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(lowerCamelCase_)
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ = LlamaForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __a ( self :Tuple) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = '''single_label_classification'''
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(lowerCamelCase_)
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ = LlamaForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __a ( self :Any) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = '''multi_label_classification'''
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(lowerCamelCase_)
UpperCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCAmelCase_ = LlamaForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCAmelCase_ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''')
def __a ( self :Optional[int]) -> Any:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def __a ( self :List[str] , _lowercase :Dict) -> str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = LlamaModel(lowerCamelCase_)
original_model.to(lowerCamelCase_)
original_model.eval()
UpperCAmelCase_ = original_model(lowerCamelCase_).last_hidden_state
UpperCAmelCase_ = original_model(lowerCamelCase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase_ = LlamaModel(lowerCamelCase_)
scaled_model.to(lowerCamelCase_)
scaled_model.eval()
UpperCAmelCase_ = scaled_model(lowerCamelCase_).last_hidden_state
UpperCAmelCase_ = scaled_model(lowerCamelCase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5))
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5))
@require_torch
class a_ ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''')
UpperCAmelCase_ = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
UpperCAmelCase_ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]])
torch.testing.assert_close(out.mean(-1) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''')
UpperCAmelCase_ = model(torch.tensor(lowerCamelCase_))
# Expected mean on dim = -1
UpperCAmelCase_ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]])
torch.testing.assert_close(out.mean(-1) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''')
UpperCAmelCase_ = model(torch.tensor(lowerCamelCase_))
# Expected mean on dim = -1
UpperCAmelCase_ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]])
torch.testing.assert_close(out.mean(-1) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513])
# fmt: on
torch.testing.assert_close(out.mean(-1) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2)
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''')
@slow
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''')
UpperCAmelCase_ = model(torch.tensor(lowerCamelCase_))
UpperCAmelCase_ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2)
# fmt: off
UpperCAmelCase_ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip('''Model is curently gated''')
@slow
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi'''
UpperCAmelCase_ = '''Simply put, the theory of relativity states that '''
UpperCAmelCase_ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''')
UpperCAmelCase_ = tokenizer.encode(lowerCamelCase_ , return_tensors='''pt''')
UpperCAmelCase_ = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=lowerCamelCase_)
# greedy generation outputs
UpperCAmelCase_ = model.generate(lowerCamelCase_ , max_new_tokens=64 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_)
UpperCAmelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
from PIL import Image
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Image:
'''simple docstring'''
UpperCAmelCase_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__UpperCAmelCase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
UpperCamelCase_ = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class a_ ( a__ ):
UpperCamelCase__ : int ="""timesformer"""
def __init__( self :List[Any] , _lowercase :Dict=224 , _lowercase :Any=16 , _lowercase :Union[str, Any]=3 , _lowercase :Union[str, Any]=8 , _lowercase :Union[str, Any]=768 , _lowercase :int=12 , _lowercase :Optional[Any]=12 , _lowercase :Tuple=3072 , _lowercase :Union[str, Any]="gelu" , _lowercase :Optional[Any]=0.0 , _lowercase :List[str]=0.0 , _lowercase :Tuple=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :List[Any]=True , _lowercase :str="divided_space_time" , _lowercase :Optional[Any]=0 , **_lowercase :Optional[int] , ) -> int:
super().__init__(**lowerCAmelCase__)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = attention_type
UpperCAmelCase_ = drop_path_rate
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
from math import factorial
UpperCamelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCAmelCase ) )
def A ( __UpperCAmelCase = 60 , __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCAmelCase_ = 0
# the cached sizes of the previous chains
UpperCAmelCase_ = {}
for start_chain_element in range(1 , __UpperCAmelCase ):
# The temporary set will contain the elements of the chain
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCAmelCase )
chain_set_length += 1
UpperCAmelCase_ = digit_factorial_sum(__UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
UpperCAmelCase_ = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
def __init__( self :List[Any] , _lowercase :int) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = nn.ModuleList(SCREAMING_SNAKE_CASE_)
def __a ( self :str , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :int , _lowercase :List[Any] , _lowercase :Any , _lowercase :Tuple = None , _lowercase :Union[str, Any] = None , _lowercase :List[str] = None , _lowercase :int = None , _lowercase :Optional[int] = False , _lowercase :Tuple = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.nets)):
UpperCAmelCase_ = controlnet(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# merge samples
if i == 0:
UpperCAmelCase_ = down_samples, mid_sample
else:
UpperCAmelCase_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __a ( self :int , _lowercase :List[str] , _lowercase :str = True , _lowercase :Any = None , _lowercase :Optional[Any] = False , _lowercase :str = None , ) -> str:
UpperCAmelCase_ = 0
UpperCAmelCase_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE_ , is_main_process=SCREAMING_SNAKE_CASE_ , save_function=SCREAMING_SNAKE_CASE_ , safe_serialization=SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ , )
idx += 1
UpperCAmelCase_ = model_path_to_save + f"_{idx}"
@classmethod
def __a ( cls :List[Any] , _lowercase :str , **_lowercase :int) -> int:
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE_):
UpperCAmelCase_ = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
controlnets.append(SCREAMING_SNAKE_CASE_)
idx += 1
UpperCAmelCase_ = pretrained_model_path + f"_{idx}"
logger.info(f"{len(SCREAMING_SNAKE_CASE_)} controlnets loaded from {pretrained_model_path}.")
if len(SCREAMING_SNAKE_CASE_) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE_)}. Expected at least {pretrained_model_path + '_0'}.")
return cls(SCREAMING_SNAKE_CASE_)
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = 10
UpperCAmelCase_ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
UpperCAmelCase_ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
UpperCamelCase_ = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt'
UpperCAmelCase_ = FILE_CONTENT
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
import bza
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.bz2'
UpperCAmelCase_ = bytes(lowerCamelCase_ , '''utf-8''' )
with bza.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
import gzip
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
UpperCAmelCase_ = bytes(lowerCamelCase_ , '''utf-8''' )
with gzip.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.lz4'
UpperCAmelCase_ = bytes(lowerCamelCase_ , '''utf-8''' )
with lza.frame.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , '''w''' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
import tarfile
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
import lzma
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.xz'
UpperCAmelCase_ = bytes(lowerCamelCase_ , '''utf-8''' )
with lzma.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
import zipfile
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.txt.zst'
UpperCAmelCase_ = bytes(lowerCamelCase_ , '''utf-8''' )
with zstd.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'file.xml'
UpperCAmelCase_ = textwrap.dedent(
'''\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return filename
UpperCamelCase_ = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
UpperCamelCase_ = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
UpperCamelCase_ = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
UpperCamelCase_ = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
UpperCamelCase_ = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def A ( ) -> List[Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = datasets.Dataset.from_dict(lowerCamelCase_ )
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
UpperCAmelCase_ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(lowerCamelCase_ , '''w''' , newline='''''' ) as f:
UpperCAmelCase_ = csv.DictWriter(lowerCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(lowerCamelCase_ , '''w''' , newline='''''' ) as f:
UpperCAmelCase_ = csv.DictWriter(lowerCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
import bza
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , '''rb''' ) as f:
UpperCAmelCase_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , '''wb''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
UpperCAmelCase_ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(lowerCamelCase_ , '''wb''' ) as f:
UpperCAmelCase_ = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
UpperCAmelCase_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
UpperCAmelCase_ = {'data': DATA}
with open(lowerCamelCase_ , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
UpperCAmelCase_ = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
import gzip
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(lowerCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(lowerCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
import gzip
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(lowerCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(lowerCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , '''w''' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = ['0', '1', '2', '3']
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ['0', '1', '2', '3']
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = ['0', '1', '2', '3']
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.abc'
with open(lowerCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = '\n'.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def A ( ) -> Tuple:
'''simple docstring'''
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def A ( ) -> Dict:
'''simple docstring'''
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __a , unittest.TestCase ):
UpperCamelCase__ : Tuple =LongformerTokenizer
UpperCamelCase__ : Any =True
UpperCamelCase__ : Dict =LongformerTokenizerFast
UpperCamelCase__ : List[str] =True
def __a ( self :Optional[Any]) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(UpperCamelCase__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(UpperCamelCase__))
def __a ( self :Optional[int] , **_lowercase :str) -> Dict:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def __a ( self :List[str] , **_lowercase :Optional[int]) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def __a ( self :List[str] , _lowercase :List[str]) -> Optional[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCamelCase__) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , UpperCamelCase__)
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__) , [0, 31414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __a ( self :int) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = '''Encode this sequence.'''
UpperCAmelCase_ = tokenizer.byte_encoder[''' '''.encode('''utf-8''')[0]]
# Testing encoder arguments
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''})
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__)
# Testing spaces after special tokens
UpperCAmelCase_ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__)}) # mask token has a left space
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
UpperCAmelCase_ = '''Encode <mask> sequence'''
UpperCAmelCase_ = '''Encode <mask>sequence'''
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__)
UpperCAmelCase_ = encoded.index(UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
UpperCAmelCase_ = tokenizer.encode(UpperCamelCase__)
UpperCAmelCase_ = encoded.index(UpperCamelCase__)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__)
def __a ( self :int) -> Dict:
pass
def __a ( self :Union[str, Any]) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
def __a ( self :Union[str, Any]) -> Optional[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
UpperCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__)
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__)
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__)
def __a ( self :List[Any]) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
UpperCAmelCase_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__) + 1, len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__) + 1, len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__), len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__), len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__) + 1, 1 + len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__), 1 + len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__)
UpperCAmelCase_ = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__), 1 + len(UpperCamelCase__) + 1 + len(UpperCamelCase__)) , )
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import functools
from typing import Any
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or len(snake_case_ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(snake_case_ , snake_case_ ) or not all(
isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
UpperCAmelCase_ = {}
UpperCAmelCase_ = '''WORD_KEEPER'''
for word in words:
UpperCAmelCase_ = trie
for c in word:
if c not in trie_node:
UpperCAmelCase_ = {}
UpperCAmelCase_ = trie_node[c]
UpperCAmelCase_ = True
UpperCAmelCase_ = len(snake_case_ )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCAmelCase ) -> bool:
if index == len_string:
return True
UpperCAmelCase_ = trie
for i in range(snake_case_ , snake_case_ ):
UpperCAmelCase_ = trie_node.get(string[i] , snake_case_ )
if trie_node is None:
return False
if trie_node.get(snake_case_ , snake_case_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( _a ):
"""simple docstring"""
def __init__( self :Dict , _lowercase :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :str , _lowercase :Any , _lowercase :Tuple , ) -> str:
super().__init__()
self.register_modules(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , )
def __a ( self :Dict , _lowercase :int = "auto") -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase)
def __a ( self :Tuple) -> Any:
self.enable_attention_slicing(__lowerCAmelCase)
@torch.no_grad()
def __call__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] = 512 , _lowercase :str = 512 , _lowercase :Optional[Any] = 50 , _lowercase :List[Any] = 7.5 , _lowercase :List[str] = None , _lowercase :List[str] = 1 , _lowercase :List[Any] = 0.0 , _lowercase :Optional[Any] = None , _lowercase :Any = None , _lowercase :List[Any] = "pil" , _lowercase :int = True , _lowercase :Optional[int] = None , _lowercase :Union[str, Any] = 1 , _lowercase :Union[str, Any] = None , **_lowercase :Any , ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
UpperCAmelCase_ = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
UpperCAmelCase_ = len(__lowerCAmelCase)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCAmelCase)}.")
# get prompt text embeddings
UpperCAmelCase_ = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
UpperCAmelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase_ = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = text_embeddings.shape
UpperCAmelCase_ = text_embeddings.repeat(1 , __lowerCAmelCase , 1)
UpperCAmelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ = 42
if negative_prompt is None:
UpperCAmelCase_ = ['''''']
elif type(__lowerCAmelCase) is not type(__lowerCAmelCase):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase)} !="
f" {type(__lowerCAmelCase)}.")
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
UpperCAmelCase_ = [negative_prompt]
elif batch_size != len(__lowerCAmelCase):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''')
else:
UpperCAmelCase_ = negative_prompt
UpperCAmelCase_ = text_input_ids.shape[-1]
UpperCAmelCase_ = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ = uncond_embeddings.shape[1]
UpperCAmelCase_ = uncond_embeddings.repeat(__lowerCAmelCase , __lowerCAmelCase , 1)
UpperCAmelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_ = torch.randn(
__lowerCAmelCase , generator=__lowerCAmelCase , device='''cpu''' , dtype=__lowerCAmelCase).to(self.device)
UpperCAmelCase_ = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device='''cpu''' , dtype=__lowerCAmelCase).to(
self.device)
else:
UpperCAmelCase_ = torch.randn(
__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase)
UpperCAmelCase_ = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
UpperCAmelCase_ = latents_reference.to(self.device)
UpperCAmelCase_ = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase_ = 0 if dx < 0 else dx
UpperCAmelCase_ = 0 if dy < 0 else dy
UpperCAmelCase_ = max(-dx , 0)
UpperCAmelCase_ = max(-dy , 0)
# import pdb
# pdb.set_trace()
UpperCAmelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_ = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for i, t in enumerate(self.progress_bar(__lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
UpperCAmelCase_ = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
# predict the noise residual
UpperCAmelCase_ = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2)
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
UpperCAmelCase_ = 1 / 0.18_215 * latents
UpperCAmelCase_ = self.vae.decode(__lowerCAmelCase).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase_ = self.feature_extractor(self.numpy_to_pil(__lowerCAmelCase) , return_tensors='''pt''').to(
self.device)
UpperCAmelCase_ , UpperCAmelCase_ = self.safety_checker(
images=__lowerCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
UpperCAmelCase_ = None
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__lowerCAmelCase)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase)
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase = False ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__a , __a ):
UpperCAmelCase_ = f"Expected string as input, found {type(__a )}"
raise ValueError(__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = f"Expected boolean as use_pascal parameter, found {type(__a )}"
raise ValueError(__a )
UpperCAmelCase_ = input_str.split('''_''' )
UpperCAmelCase_ = 0 if use_pascal else 1
UpperCAmelCase_ = words[start_index:]
UpperCAmelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase_ = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
def A ( __UpperCAmelCase = 10**12 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"{solution() = }")
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def A ( __UpperCAmelCase , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = int(deit_name[-6:-4] )
UpperCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
UpperCAmelCase_ = 192
UpperCAmelCase_ = 768
UpperCAmelCase_ = 12
UpperCAmelCase_ = 3
elif deit_name[9:].startswith('''small''' ):
UpperCAmelCase_ = 384
UpperCAmelCase_ = 1536
UpperCAmelCase_ = 12
UpperCAmelCase_ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
# load original model from timm
UpperCAmelCase_ = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
UpperCAmelCase_ = DeiTForImageClassificationWithTeacher(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ = DeiTImageProcessor(size=lowerCAmelCase__ , crop_size=config.image_size )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
UpperCamelCase_ = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
UpperCamelCase_ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] =["input_ids", "attention_mask"]
UpperCamelCase__ : Any =MBartTokenizer
UpperCamelCase__ : List[int] =[]
UpperCamelCase__ : List[int] =[]
def __init__( self :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :Dict=None , _lowercase :int="<s>" , _lowercase :Optional[Any]="</s>" , _lowercase :List[Any]="</s>" , _lowercase :Tuple="<s>" , _lowercase :Dict="<unk>" , _lowercase :List[Any]="<pad>" , _lowercase :Optional[int]="<mask>" , _lowercase :List[str]=None , _lowercase :str=None , _lowercase :int=None , **_lowercase :List[str] , ) -> Optional[Any]:
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
UpperCAmelCase_ = {
lang_code: self.convert_tokens_to_ids(_lowercase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase_ = self.convert_tokens_to_ids(self._src_lang)
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def __a ( self :Union[str, Any]) -> str:
return self._src_lang
@src_lang.setter
def __a ( self :Tuple , _lowercase :str) -> None:
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __a ( self :Any , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self :List[Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __a ( self :Tuple , _lowercase :int , _lowercase :str , _lowercase :Optional[str] , _lowercase :Optional[str] , **_lowercase :List[Any]) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase)
UpperCAmelCase_ = self.convert_tokens_to_ids(_lowercase)
UpperCAmelCase_ = tgt_lang_id
return inputs
def __a ( self :int , _lowercase :List[str] , _lowercase :str = "en_XX" , _lowercase :Optional[List[str]] = None , _lowercase :str = "ro_RO" , **_lowercase :Any , ) -> BatchEncoding:
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase)
def __a ( self :int) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def __a ( self :Dict) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def __a ( self :Union[str, Any] , _lowercase :Optional[int]) -> None:
UpperCAmelCase_ = self.convert_tokens_to_ids(_lowercase)
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens)
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens)
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def __a ( self :str , _lowercase :str) -> None:
UpperCAmelCase_ = self.convert_tokens_to_ids(_lowercase)
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens)
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens)
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase):
copyfile(self.vocab_file , _lowercase)
return (out_vocab_file,) | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["PoolFormerFeatureExtractor"]
UpperCamelCase_ = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCamelCase_ = "docs/source/en/_toctree.yml"
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = defaultdict(__UpperCAmelCase )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__UpperCAmelCase )
UpperCAmelCase_ = new_doc_list
UpperCAmelCase_ = [key for key, value in counts.items() if value > 1]
UpperCAmelCase_ = []
for duplicate_key in duplicates:
UpperCAmelCase_ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCAmelCase_ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCAmelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__UpperCAmelCase )
# Sort
return overview_doc
def A ( __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase_ = api_doc[scheduler_idx]['''sections''']
UpperCAmelCase_ = clean_doc_toc(__UpperCAmelCase )
UpperCAmelCase_ = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase_ = True
if overwrite:
UpperCAmelCase_ = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase_ = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def A ( __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase_ = False
UpperCAmelCase_ = api_doc[pipeline_idx]['''sections''']
UpperCAmelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase_ = pipeline_doc['''section''']
UpperCAmelCase_ = clean_doc_toc(__UpperCAmelCase )
if overwrite:
UpperCAmelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCAmelCase )
# sort overall pipeline doc
UpperCAmelCase_ = clean_doc_toc(__UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase_ = True
if overwrite:
UpperCAmelCase_ = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase_ = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
import re
import subprocess
import sys
UpperCamelCase_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase_ = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
UpperCamelCase_ = "|".join(sys.argv[1:])
UpperCamelCase_ = re.compile(rf"^({joined_dirs}).*?\.py$")
UpperCamelCase_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
from PIL import Image
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = image.size
UpperCAmelCase_ = 0
UpperCAmelCase_ = image.load()
for i in range(_A ):
for j in range(_A ):
UpperCAmelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_A ):
for i in range(_A ):
UpperCAmelCase_ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase_ = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase_ = CLIPImageProcessor()
UpperCamelCase_ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1]
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] )
def A ( __UpperCAmelCase = 1_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for num in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = num
while iterations < 50:
UpperCAmelCase_ = sum_reverse(__UpperCAmelCase )
iterations += 1
if is_palindrome(__UpperCAmelCase ):
break
else:
lychrel_nums.append(__UpperCAmelCase )
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
import requests
from bsa import BeautifulSoup
def A ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
UpperCAmelCase_ = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase_ = BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
UpperCAmelCase_ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
import requests
UpperCamelCase_ = "" # <-- Put your OpenWeatherMap appid here!
UpperCamelCase_ = "https://api.openweathermap.org/data/2.5/"
def A ( __UpperCAmelCase = "Chicago" , __UpperCAmelCase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def A ( __UpperCAmelCase = "Kolkata, India" , __UpperCAmelCase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def A ( __UpperCAmelCase = 55.68 , __UpperCAmelCase = 12.57 , __UpperCAmelCase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCamelCase_ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = [0] * len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = []
UpperCAmelCase_ = [1] * len(SCREAMING_SNAKE_CASE_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
while queue:
UpperCAmelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
print(max(SCREAMING_SNAKE_CASE_ ) )
# Adjacency list of Graph
UpperCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(snake_case__ , snake_case__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 2
while digits < n:
index += 1
UpperCAmelCase_ = len(str(fibonacci(snake_case__ ) ) )
return index
def A ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCAmelCase_ = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = grid[row_n]
UpperCAmelCase_ = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = grid[row_n]
return grid[-1][-1]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["YolosFeatureExtractor"]
_lowerCAmelCase = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class a_ ( UpperCamelCase__ ):
UpperCamelCase__ : Any ="""camembert"""
def __init__( self :List[str] , _lowercase :List[Any]=30522 , _lowercase :Union[str, Any]=768 , _lowercase :str=12 , _lowercase :str=12 , _lowercase :Tuple=3072 , _lowercase :Tuple="gelu" , _lowercase :str=0.1 , _lowercase :int=0.1 , _lowercase :List[Any]=512 , _lowercase :Tuple=2 , _lowercase :Optional[int]=0.02 , _lowercase :Union[str, Any]=1E-1_2 , _lowercase :Tuple=1 , _lowercase :int=0 , _lowercase :Tuple=2 , _lowercase :List[Any]="absolute" , _lowercase :Any=True , _lowercase :List[Any]=None , **_lowercase :Optional[Any] , ) -> int:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a_ ( UpperCamelCase__ ):
@property
def __a ( self :Any) -> Optional[Any]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self :Optional[Any] , _lowercase :Optional[Any] , _lowercase :Optional[int]=3 , _lowercase :Any=32 , _lowercase :List[str]=3 , _lowercase :Tuple=10 , _lowercase :List[str]=[10, 20, 30, 40] , _lowercase :Dict=[1, 1, 2, 1] , _lowercase :Any=True , _lowercase :List[str]=True , _lowercase :Optional[Any]="relu" , _lowercase :Any=3 , _lowercase :List[str]=None , ) -> Optional[Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_UpperCAmelCase)
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :List[str]) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self :List[Any] , _lowercase :int , _lowercase :Optional[int] , _lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = RegNetModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :Optional[int] , _lowercase :Any , _lowercase :str , _lowercase :List[Any]) -> Dict:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = RegNetForImageClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
UpperCamelCase__ : Optional[int] =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Optional[int] =(
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =False
def __a ( self :str) -> List[str]:
UpperCAmelCase_ = RegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase)
def __a ( self :Optional[Any]) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :Optional[Any]) -> List[str]:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def __a ( self :Dict) -> Union[str, Any]:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def __a ( self :Optional[Any]) -> Tuple:
pass
def __a ( self :Dict) -> Optional[int]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def __a ( self :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=_UpperCAmelCase)
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __a ( self :Union[str, Any]) -> Any:
def check_hidden_states_output(_lowercase :Optional[int] , _lowercase :Optional[int] , _lowercase :str):
UpperCAmelCase_ = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def __a ( self :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@slow
def __a ( self :Optional[int]) -> List[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = RegNetModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def A ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :List[Any]) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __a ( self :Any) -> Optional[int]:
UpperCAmelCase_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(_UpperCAmelCase)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
UpperCAmelCase_ = torch.tensor([-0.4_180, -1.5_051, -3.4_836]).to(_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4))
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import sys
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [[0 for x in range(__SCREAMING_SNAKE_CASE )] for x in range(__SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ = [[0 for x in range(__SCREAMING_SNAKE_CASE )] for x in range(__SCREAMING_SNAKE_CASE )]
for chain_length in range(2 , __SCREAMING_SNAKE_CASE ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if i == j:
print('''A''' + str(__SCREAMING_SNAKE_CASE ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , optimal_solution[i][j] )
print_optiomal_solution(__SCREAMING_SNAKE_CASE , optimal_solution[i][j] + 1 , __SCREAMING_SNAKE_CASE )
print(''')''' , end=''' ''' )
def A ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ = matrix_chain_order(__SCREAMING_SNAKE_CASE )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__SCREAMING_SNAKE_CASE , 1 , n - 1 )
if __name__ == "__main__":
main()
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
UpperCamelCase_ = range(2, 20 + 1)
UpperCamelCase_ = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase_ = {}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = sum(a_i[j] for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ) )
UpperCAmelCase_ = sum(a_i[j] * base[j] for j in range(min(len(__lowerCAmelCase ) , __lowerCAmelCase ) ) )
UpperCAmelCase_ , UpperCAmelCase_ = 0, 0
UpperCAmelCase_ = n - i
UpperCAmelCase_ = memo.get(__lowerCAmelCase )
if sub_memo is not None:
UpperCAmelCase_ = sub_memo.get(__lowerCAmelCase )
if jumps is not None and len(__lowerCAmelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase_ = -1
for _k in range(len(__lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase_ = _k
break
if max_jump >= 0:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase_ = diff + c
for j in range(min(__lowerCAmelCase , len(__lowerCAmelCase ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__lowerCAmelCase , 10 )
if new_c > 0:
add(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = {c: []}
UpperCAmelCase_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase_ , UpperCAmelCase_ = next_term(__lowerCAmelCase , k - 1 , i + dn , __lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase_ , UpperCAmelCase_ = compute(__lowerCAmelCase , __lowerCAmelCase , i + dn , __lowerCAmelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase_ = 0
while j < len(__lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(__lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase_ = i
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
for j in range(len(__lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase_ = ds_c + ds_b
diff += addend
UpperCAmelCase_ = 0
for j in range(__lowerCAmelCase ):
UpperCAmelCase_ = a_i[j] + addend
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return diff, i - start_i
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
UpperCAmelCase_ = digits[j] + addend
if s >= 10:
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__lowerCAmelCase , 10 )
UpperCAmelCase_ = addend // 10 + quotient
else:
UpperCAmelCase_ = s
UpperCAmelCase_ = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__lowerCAmelCase , 10 )
digits.append(__lowerCAmelCase )
def A ( __UpperCAmelCase = 10**15 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = [1]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ , UpperCAmelCase_ = next_term(__lowerCAmelCase , 20 , i + dn , __lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase_ = 0
for j in range(len(__lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"{solution() = }")
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
from sklearn.metrics import recall_score
import datasets
UpperCamelCase_ = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
UpperCamelCase_ = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
UpperCamelCase_ = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def __a ( self :int) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''')),
'''references''': datasets.Sequence(datasets.Value('''int32''')),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def __a ( self :Tuple , _lowercase :Optional[Any] , _lowercase :str , _lowercase :Optional[int]=None , _lowercase :Dict=1 , _lowercase :List[str]="binary" , _lowercase :Tuple=None , _lowercase :str="warn" , ) -> Dict:
UpperCAmelCase_ = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase) if score.size == 1 else score}
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = 11
UpperCAmelCase_ = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
UpperCAmelCase_ = 10
return solutions
def A ( __UpperCAmelCase = 2 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
UpperCAmelCase_ = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase_ = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = '''https://pypi.org/pypi/diffusers/json'''
UpperCAmelCase_ = json.loads(request.urlopen(__UpperCAmelCase ).read() )['''releases'''].keys()
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : version.Version(__UpperCAmelCase ) )
def A ( ) -> Any:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase_ = Path(__UpperCAmelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
init_hf_modules()
UpperCAmelCase_ = Path(__UpperCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase_ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase_ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCAmelCase ) )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = [module_file]
UpperCAmelCase_ = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase_ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCAmelCase ) )
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent
UpperCAmelCase_ = [str(module_path / m ) for m in new_imports]
UpperCAmelCase_ = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase_ = [f"{f}.py" for f in new_import_files]
UpperCAmelCase_ = len(__UpperCAmelCase ) == 0
all_relative_imports.extend(__UpperCAmelCase )
return all_relative_imports
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = f.read()
# Imports of the form `import xxx`
UpperCAmelCase_ = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase_ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
UpperCAmelCase_ = list(set(__UpperCAmelCase ) )
UpperCAmelCase_ = []
for imp in imports:
try:
importlib.import_module(__UpperCAmelCase )
except ImportError:
missing_packages.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"{', '.join(__UpperCAmelCase )}. Run `pip install {' '.join(__UpperCAmelCase )}`" )
return get_relative_imports(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = module_path.replace(os.path.sep , '''.''' )
UpperCAmelCase_ = importlib.import_module(__UpperCAmelCase )
if class_name is None:
return find_pipeline_class(__UpperCAmelCase )
return getattr(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
UpperCAmelCase_ = dict(inspect.getmembers(__UpperCAmelCase , inspect.isclass ) )
UpperCAmelCase_ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCAmelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
UpperCAmelCase_ = cls
return pipeline_class
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = str(__UpperCAmelCase )
UpperCAmelCase_ = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
UpperCAmelCase_ = module_file_or_url
UpperCAmelCase_ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
UpperCAmelCase_ = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase_ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase_ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
UpperCAmelCase_ = f"v{revision}"
elif revision == "main":
UpperCAmelCase_ = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
UpperCAmelCase_ = COMMUNITY_PIPELINES_URL.format(revision=__UpperCAmelCase , pipeline=__UpperCAmelCase )
try:
UpperCAmelCase_ = cached_download(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , )
UpperCAmelCase_ = '''git'''
UpperCAmelCase_ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase_ = hf_hub_download(
__UpperCAmelCase , __UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , )
UpperCAmelCase_ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
UpperCAmelCase_ = check_imports(__UpperCAmelCase )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase_ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCAmelCase )
UpperCAmelCase_ = Path(__UpperCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase_ = f"{module_needed}.py"
shutil.copy(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = use_auth_token
elif use_auth_token is True:
UpperCAmelCase_ = HfFolder.get_token()
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = model_info(__UpperCAmelCase , revision=__UpperCAmelCase , token=__UpperCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase_ = submodule_path / commit_hash
UpperCAmelCase_ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCAmelCase , f"{module_needed}.py" , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , resume_download=__UpperCAmelCase , proxies=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , **__UpperCAmelCase , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = get_cached_module_file(
__UpperCAmelCase , __UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , resume_download=__UpperCAmelCase , proxies=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
return get_class_in_module(__UpperCAmelCase , final_module.replace('''.py''' , '''''' ) )
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> Union[str, Any]:
debug_launcher(test_script.main)
def __a ( self :List[str]) -> str:
debug_launcher(test_ops.main) | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
import math
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
return math.sqrt(__UpperCAmelCase ) * math.sqrt(__UpperCAmelCase ) == num
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = n
while left <= right:
UpperCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase_ = mid - 1
else:
UpperCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import os
from pathlib import Path
def A ( ) -> List[str]:
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCAmelCase_ = Path(__UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
UpperCAmelCase_ = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCAmelCase , with_cuda=__UpperCAmelCase , extra_include_paths=[str(__UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a_ ( _snake_case ):
def __init__( self :int , _lowercase :Dict , _lowercase :List[str]=None , _lowercase :Any=True , _lowercase :Tuple=None , **_lowercase :List[Any]) -> Tuple:
UpperCAmelCase_ = parent
UpperCAmelCase_ = config_class
UpperCAmelCase_ = has_text_modality
UpperCAmelCase_ = kwargs
UpperCAmelCase_ = common_properties
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
UpperCAmelCase_ = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowercase , _lowercase) , msg=f"`{prop}` does not exist")
# Test that config has the common properties as setter
for idx, name in enumerate(_lowercase):
try:
setattr(_lowercase , _lowercase , _lowercase)
self.parent.assertEqual(
getattr(_lowercase , _lowercase) , _lowercase , msg=f"`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase)}")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowercase):
try:
UpperCAmelCase_ = self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(_lowercase , _lowercase) , _lowercase , msg=f"`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase)}")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
UpperCAmelCase_ = json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowercase)
def __a ( self :Optional[Any]) -> Tuple:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(_lowercase , '''config.json''')
config_first.to_json_file(_lowercase)
UpperCAmelCase_ = self.config_class.from_json_file(_lowercase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowercase)
UpperCAmelCase_ = self.config_class.from_pretrained(_lowercase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
UpperCAmelCase_ = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(_lowercase , _lowercase)
config_first.save_pretrained(_lowercase)
UpperCAmelCase_ = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = self.config_class(**self.inputs_dict , num_labels=5)
self.parent.assertEqual(len(config.idalabel) , 5)
self.parent.assertEqual(len(config.labelaid) , 5)
UpperCAmelCase_ = 3
self.parent.assertEqual(len(config.idalabel) , 3)
self.parent.assertEqual(len(config.labelaid) , 3)
def __a ( self :Optional[Any]) -> Any:
if self.config_class.is_composition:
return
UpperCAmelCase_ = self.config_class()
self.parent.assertIsNotNone(_lowercase)
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = copy.deepcopy(_lowercase)
UpperCAmelCase_ = self.config_class(**_lowercase)
UpperCAmelCase_ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa))
elif getattr(_lowercase , _lowercase) != value:
wrong_values.append((key, getattr(_lowercase , _lowercase), value))
if len(_lowercase) > 0:
UpperCAmelCase_ = '''\n'''.join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values])
raise ValueError(f"The following keys were not properly set in the config:\n{errors}")
def __a ( self :Any) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : List[str] ="git_vision_model"
def __init__( self :Optional[Any] , _lowercase :Optional[int]=768 , _lowercase :Any=3072 , _lowercase :Dict=12 , _lowercase :Optional[Any]=12 , _lowercase :int=3 , _lowercase :List[str]=224 , _lowercase :Dict=16 , _lowercase :List[str]="quick_gelu" , _lowercase :List[str]=1E-5 , _lowercase :Any=0.0 , _lowercase :Optional[int]=0.02 , **_lowercase :List[Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
@classmethod
def __a ( cls :Any , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int]) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
UpperCAmelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="git"
def __init__( self :int , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=30522 , _lowercase :Union[str, Any]=768 , _lowercase :Union[str, Any]=6 , _lowercase :Dict=12 , _lowercase :Dict=3072 , _lowercase :Any="gelu" , _lowercase :Union[str, Any]=0.1 , _lowercase :Any=0.1 , _lowercase :Any=1024 , _lowercase :Union[str, Any]=0.02 , _lowercase :Any=1E-1_2 , _lowercase :Tuple=0 , _lowercase :Tuple="absolute" , _lowercase :Optional[int]=True , _lowercase :Dict=False , _lowercase :Tuple=101 , _lowercase :Optional[int]=102 , _lowercase :Optional[Any]=None , **_lowercase :Dict , ) -> Optional[Any]:
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase)
if vision_config is None:
UpperCAmelCase_ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
UpperCAmelCase_ = GitVisionConfig(**_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = tie_word_embeddings
UpperCAmelCase_ = num_image_with_embedding
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
def __a ( self :Tuple) -> Tuple:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : int =StableDiffusionDiffEditPipeline
UpperCamelCase__ : Union[str, Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase__ : List[Any] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : int =frozenset([] )
def __a ( self :List[Any]) -> Union[str, Any]:
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
UpperCAmelCase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_zero=_lowercase , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_lowercase)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=0) -> Any:
UpperCAmelCase_ = floats_tensor((1, 16, 16) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowercase)).to(_lowercase)
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :str , _lowercase :Optional[int] , _lowercase :int=0) -> List[Any]:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :List[str]=0) -> Tuple:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :str) -> Optional[int]:
if not hasattr(self.pipeline_class , '''_optional_components'''):
return
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe(**_lowercase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase)
UpperCAmelCase_ = self.pipeline_class.from_pretrained(_lowercase)
pipe_loaded.to(_lowercase)
pipe_loaded.set_progress_bar_config(disable=_lowercase)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase) is None , f"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe_loaded(**_lowercase)[0]
UpperCAmelCase_ = np.abs(output - output_loaded).max()
self.assertLess(_lowercase , 1E-4)
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_mask_inputs(_lowercase)
UpperCAmelCase_ = pipe.generate_mask(**_lowercase)
UpperCAmelCase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
UpperCAmelCase_ = np.array([0] * 9)
UpperCAmelCase_ = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inversion_inputs(_lowercase)
UpperCAmelCase_ = pipe.invert(**_lowercase).images
UpperCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
UpperCAmelCase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
def __a ( self :List[Any]) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ = DPMSolverMultistepScheduler(**_lowercase)
UpperCAmelCase_ = DPMSolverMultistepInverseScheduler(**_lowercase)
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inversion_inputs(_lowercase)
UpperCAmelCase_ = pipe.invert(**_lowercase).images
UpperCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
UpperCAmelCase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def __a ( self :Union[str, Any]) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __a ( cls :List[str]) -> Dict:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''')
UpperCAmelCase_ = raw_image.convert('''RGB''').resize((768, 768))
UpperCAmelCase_ = raw_image
def __a ( self :str) -> int:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''a bowl of fruit'''
UpperCAmelCase_ = '''a bowl of pears'''
UpperCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowercase , target_prompt=_lowercase , generator=_lowercase , )
UpperCAmelCase_ = pipe.invert(
prompt=_lowercase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowercase).latents
UpperCAmelCase_ = pipe(
prompt=_lowercase , mask_image=_lowercase , image_latents=_lowercase , generator=_lowercase , negative_prompt=_lowercase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def __a ( self :Tuple) -> Any:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''a bowl of fruit'''
UpperCAmelCase_ = '''a bowl of pears'''
UpperCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowercase , target_prompt=_lowercase , generator=_lowercase , )
UpperCAmelCase_ = pipe.invert(
prompt=_lowercase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowercase , num_inference_steps=25 , ).latents
UpperCAmelCase_ = pipe(
prompt=_lowercase , mask_image=_lowercase , image_latents=_lowercase , generator=_lowercase , negative_prompt=_lowercase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
class a_ :
def __init__( self :str) -> List[Any]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = {}
def __a ( self :Tuple , _lowercase :Tuple) -> Optional[int]:
if vertex not in self.adjacency:
UpperCAmelCase_ = {}
self.num_vertices += 1
def __a ( self :List[Any] , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Union[str, Any]) -> Any:
self.add_vertex(_lowercase)
self.add_vertex(_lowercase)
if head == tail:
return
UpperCAmelCase_ = weight
UpperCAmelCase_ = weight
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_edges()
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge
edges.remove((tail, head, weight))
for i in range(len(_lowercase)):
UpperCAmelCase_ = list(edges[i])
edges.sort(key=lambda _lowercase: e[2])
for i in range(len(_lowercase) - 1):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase_ = edges[i][2] + 1
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge
UpperCAmelCase_ = weight
UpperCAmelCase_ = weight
def __str__( self :int) -> List[Any]:
UpperCAmelCase_ = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase_ = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''')
def __a ( self :int) -> str:
UpperCAmelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __a ( self :Union[str, Any]) -> List[str]:
return self.adjacency.keys()
@staticmethod
def __a ( _lowercase :Union[str, Any]=None , _lowercase :int=None) -> str:
UpperCAmelCase_ = Graph()
if vertices is None:
UpperCAmelCase_ = []
if edges is None:
UpperCAmelCase_ = []
for vertex in vertices:
g.add_vertex(_lowercase)
for edge in edges:
g.add_edge(*_lowercase)
return g
class a_ :
def __init__( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
def __len__( self :str) -> Union[str, Any]:
return len(self.parent)
def __a ( self :List[Any] , _lowercase :Dict) -> List[str]:
if item in self.parent:
return self.find(_lowercase)
UpperCAmelCase_ = item
UpperCAmelCase_ = 0
return item
def __a ( self :Optional[int] , _lowercase :Optional[int]) -> Tuple:
if item not in self.parent:
return self.make_set(_lowercase)
if item != self.parent[item]:
UpperCAmelCase_ = self.find(self.parent[item])
return self.parent[item]
def __a ( self :List[str] , _lowercase :Optional[int] , _lowercase :str) -> Dict:
UpperCAmelCase_ = self.find(_lowercase)
UpperCAmelCase_ = self.find(_lowercase)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase_ = roota
return roota
return None
@staticmethod
def __a ( _lowercase :Union[str, Any]) -> int:
UpperCAmelCase_ = graph.num_vertices
UpperCAmelCase_ = Graph.UnionFind()
UpperCAmelCase_ = []
while num_components > 1:
UpperCAmelCase_ = {}
for vertex in graph.get_vertices():
UpperCAmelCase_ = -1
UpperCAmelCase_ = graph.get_edges()
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge
edges.remove((tail, head, weight))
for edge in edges:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = edge
UpperCAmelCase_ = union_find.find(_lowercase)
UpperCAmelCase_ = union_find.find(_lowercase)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = cheap_edge[vertex]
if union_find.find(_lowercase) != union_find.find(_lowercase):
union_find.union(_lowercase , _lowercase)
mst_edges.append(cheap_edge[vertex])
UpperCAmelCase_ = num_components - 1
UpperCAmelCase_ = Graph.build(edges=_lowercase)
return mst
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
import argparse
import os
import re
UpperCamelCase_ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase_ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
UpperCamelCase_ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def A ( __UpperCAmelCase , __UpperCAmelCase = False ) -> List[str]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ = content.split('''\n''' )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while line_idx < len(__UpperCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCAmelCase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCAmelCase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCAmelCase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCAmelCase_ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : _re_identifier.search(__UpperCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__UpperCAmelCase ) )
elif "\n".join(__UpperCAmelCase ) != content:
return True
def A ( __UpperCAmelCase = False ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = [os.path.join(__UpperCAmelCase , __UpperCAmelCase ) for f in os.listdir(__UpperCAmelCase ) if f.endswith('''.py''' )]
UpperCAmelCase_ = [sort_auto_mapping(__UpperCAmelCase , overwrite=__UpperCAmelCase ) for fname in fnames]
if not overwrite and any(__UpperCAmelCase ):
UpperCAmelCase_ = [f for f, d in zip(__UpperCAmelCase , __UpperCAmelCase ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(__UpperCAmelCase )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCamelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
import re
def A ( __UpperCAmelCase ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(__UpperCAmelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
return to_simple_case(__UpperCAmelCase )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(__UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return to_complex_case(__UpperCAmelCase , __UpperCAmelCase , '''_''' )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return to_complex_case(__UpperCAmelCase , __UpperCAmelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] ="blenderbot-small"
UpperCamelCase__ : Any =["past_key_values"]
UpperCamelCase__ : Optional[Any] ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Union[str, Any] , _lowercase :List[str]=50265 , _lowercase :Any=512 , _lowercase :str=8 , _lowercase :Union[str, Any]=2048 , _lowercase :Union[str, Any]=16 , _lowercase :Tuple=8 , _lowercase :Tuple=2048 , _lowercase :str=16 , _lowercase :Any=0.0 , _lowercase :Optional[int]=0.0 , _lowercase :Union[str, Any]=True , _lowercase :int=True , _lowercase :int="gelu" , _lowercase :List[Any]=512 , _lowercase :Tuple=0.1 , _lowercase :Dict=0.0 , _lowercase :Dict=0.0 , _lowercase :Optional[int]=0.02 , _lowercase :str=1 , _lowercase :Optional[int]=False , _lowercase :Optional[Any]=0 , _lowercase :Any=1 , _lowercase :str=2 , _lowercase :Any=2 , **_lowercase :Optional[int] , ) -> List[str]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class a_ ( _snake_case ):
@property
def __a ( self :Optional[int]) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
UpperCAmelCase_ = {0: '''batch'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_lowercase):
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super().outputs
else:
UpperCAmelCase_ = super(_lowercase , self).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_lowercase):
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __a ( self :List[str] , _lowercase :PreTrainedTokenizer , _lowercase :int = -1 , _lowercase :int = -1 , _lowercase :bool = False , _lowercase :Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Generate decoder inputs
UpperCAmelCase_ = seq_length if not self.use_past else 1
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
UpperCAmelCase_ = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ = dict(**_lowercase , **_lowercase)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
UpperCAmelCase_ = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = decoder_seq_length + 3
UpperCAmelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase)] , dim=1)
UpperCAmelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ = min(_lowercase , _lowercase)
UpperCAmelCase_ = max(_lowercase , _lowercase) - min_num_layers
UpperCAmelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase),
torch.zeros(_lowercase),
torch.zeros(_lowercase),
torch.zeros(_lowercase),
))
# TODO: test this.
UpperCAmelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase):
common_inputs["past_key_values"].append((torch.zeros(_lowercase), torch.zeros(_lowercase)))
return common_inputs
def __a ( self :List[Any] , _lowercase :PreTrainedTokenizer , _lowercase :int = -1 , _lowercase :int = -1 , _lowercase :bool = False , _lowercase :Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = common_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase)] , dim=1)
UpperCAmelCase_ = [
(torch.zeros(_lowercase), torch.zeros(_lowercase)) for _ in range(_lowercase)
]
return common_inputs
def __a ( self :Optional[int] , _lowercase :PreTrainedTokenizer , _lowercase :int = -1 , _lowercase :int = -1 , _lowercase :bool = False , _lowercase :Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(_lowercase)
UpperCAmelCase_ = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase)
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size
UpperCAmelCase_ = dict(tokenizer(_lowercase , return_tensors=_lowercase))
return common_inputs
def __a ( self :Optional[Any] , _lowercase :PreTrainedTokenizer , _lowercase :int = -1 , _lowercase :int = -1 , _lowercase :bool = False , _lowercase :Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase)
elif self.task == "causal-lm":
UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase)
else:
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase)
return common_inputs
def __a ( self :Tuple , _lowercase :int , _lowercase :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase)
else:
UpperCAmelCase_ = super(_lowercase , self)._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase)
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple =(DDPMParallelScheduler,)
def __a ( self :Tuple , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowercase)
return config
def __a ( self :Dict) -> Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :Tuple) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Dict) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :Any) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase)
def __a ( self :Tuple) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase)
def __a ( self :Union[str, Any]) -> Optional[int]:
self.check_over_configs(thresholding=_lowercase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def __a ( self :Dict) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Tuple) -> List[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Union[str, Any]) -> Optional[int]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00_979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = self.dummy_sample_deter + 0.1
UpperCAmelCase_ = self.dummy_sample_deter - 0.1
UpperCAmelCase_ = samplea.shape[0]
UpperCAmelCase_ = torch.stack([samplea, samplea, samplea] , dim=0)
UpperCAmelCase_ = torch.arange(_lowercase)[0:3, None].repeat(1 , _lowercase)
UpperCAmelCase_ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
UpperCAmelCase_ = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5_005) < 1E-3
def __a ( self :Optional[int]) -> Dict:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0)
for t in reversed(range(_lowercase)):
# 1. predict noise residual
UpperCAmelCase_ = model(_lowercase , _lowercase)
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 258.9_606) < 1E-2
assert abs(result_mean.item() - 0.3_372) < 1E-3
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''')
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0)
for t in reversed(range(_lowercase)):
# 1. predict noise residual
UpperCAmelCase_ = model(_lowercase , _lowercase)
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 202.0_296) < 1E-2
assert abs(result_mean.item() - 0.2_631) < 1E-3
def __a ( self :Optional[int]) -> List[str]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase)
UpperCAmelCase_ = scheduler.timesteps
for i, timestep in enumerate(_lowercase):
if i == len(_lowercase) - 1:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = timesteps[i + 1]
UpperCAmelCase_ = scheduler.previous_timestep(_lowercase)
UpperCAmelCase_ = prev_t.item()
self.assertEqual(_lowercase , _lowercase)
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=_lowercase)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = [100, 87, 50, 1, 0]
UpperCAmelCase_ = len(_lowercase)
with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase)
def __a ( self :Union[str, Any]) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowercase)
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a_ ( _snake_case ):
UpperCamelCase__ : int ="vivit"
def __init__( self :Any , _lowercase :int=224 , _lowercase :Optional[Any]=32 , _lowercase :str=[2, 16, 16] , _lowercase :Optional[Any]=3 , _lowercase :Dict=768 , _lowercase :Any=12 , _lowercase :Union[str, Any]=12 , _lowercase :List[str]=3072 , _lowercase :Dict="gelu_fast" , _lowercase :str=0.0 , _lowercase :Any=0.0 , _lowercase :int=0.02 , _lowercase :Tuple=1E-0_6 , _lowercase :Optional[Any]=True , **_lowercase :Optional[Any] , ) -> Union[str, Any]:
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = tubelet_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
super().__init__(**_lowercase)
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
import os
def A ( __UpperCAmelCase = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) as input_file:
UpperCAmelCase_ = [
[int(__UpperCAmelCase ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(matrix[0] )
UpperCAmelCase_ = [[-1 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = matrix[i][0]
for j in range(1 , __UpperCAmelCase ):
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.