code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) + 1 a_ = len(_UpperCAmelCase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. a_ = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )] # since string of zero length match pattern of zero length a_ = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _UpperCAmelCase ): a_ = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _UpperCAmelCase ): a_ = dp[0][j - 2] if pattern[j - 1] == '*' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _UpperCAmelCase ): for j in range(1 , _UpperCAmelCase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": a_ = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: a_ = 1 elif pattern[j - 2] in (input_string[i - 1], "."): a_ = dp[i - 1][j] else: a_ = 0 else: a_ = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") __lowerCAmelCase ="aab" __lowerCAmelCase ="c*a*b" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'''{input_string} matches the given pattern {pattern}''') else: print(f'''{input_string} does not match with the given pattern {pattern}''')
697
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]: a_ = parent a_ = out_indices if out_indices is not None else [4] a_ = stage_names a_ = out_features a_ = backbone a_ = batch_size a_ = image_size a_ = num_channels a_ = use_pretrained_backbone a_ = is_training def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = TimmBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(UpperCAmelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = self.prepare_config_and_inputs() a_ , a_ = config_and_inputs a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (TimmBackbone,) if is_torch_available() else () _UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = TimmBackboneModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'resnet18' a_ = 'microsoft/resnet-18' a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Safetensors is not supported by timm.' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = True a_ = self.has_attentions # no need to test all models as different heads yield the same functionality a_ = self.all_model_classes[0] a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ) a_ = outputs[0][-1] # Encoder-/Decoder-only models a_ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a_ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=UpperCAmelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = None a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = False a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ )
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase ={ "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["RoFormerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]: a_ = size if size is not None else {'height': 18, 'width': 18} a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = min_resolution a_ = max_resolution a_ = do_resize a_ = size a_ = apply_ocr def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = LayoutLMvaImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase__ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase__ ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # with apply_OCR = True a_ = LayoutLMvaImageProcessor() from datasets import load_dataset a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) a_ = Image.open(ds[0]['file'] ).convert('RGB' ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase__ ) self.assertListEqual(encoding.boxes , UpperCAmelCase__ ) # with apply_OCR = False a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
697
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "canine" def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1_6384 , UpperCAmelCase__=16 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__=0xe000 , UpperCAmelCase__=0xe001 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__=8 , UpperCAmelCase__=1_6384 , UpperCAmelCase__=128 , **UpperCAmelCase__ , ) -> Any: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = max_position_embeddings a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = type_vocab_size a_ = layer_norm_eps # Character config: a_ = downsampling_rate a_ = upsampling_kernel_size a_ = num_hash_functions a_ = num_hash_buckets a_ = local_transformer_stride
697
'''simple docstring''' import math def a ( _UpperCAmelCase ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int: """simple docstring""" try: a_ = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) a_ = [] a_ = 2 while len(_UpperCAmelCase ) < nth: if is_prime(_UpperCAmelCase ): primes.append(_UpperCAmelCase ) num += 1 else: num += 1 return primes[len(_UpperCAmelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' import warnings from functools import wraps from typing import Callable def a ( _UpperCAmelCase ) -> Callable: """simple docstring""" @wraps(_UpperCAmelCase ) def _inner_fn(*_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( (F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _UpperCAmelCase , ) return fn(*_UpperCAmelCase , **_UpperCAmelCase ) return _inner_fn
697
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = [1, 2, 3, 4] a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = '' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) a_ , a_ = process_story(UpperCAmelCase__ ) a_ = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = ['It was the best of times.'] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = torch.tensor([1, 2, 3, 4] ) a_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = 101 a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ ) np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' from __future__ import annotations from statistics import mean def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[int]: """simple docstring""" a_ = [0] * no_of_processes a_ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_UpperCAmelCase ): a_ = burst_time[i] a_ = [] a_ = 0 a_ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: a_ = [] a_ = -1 for i in range(_UpperCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: a_ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: a_ = i total_time += burst_time[target_process] completed += 1 a_ = 0 a_ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[int]: """simple docstring""" a_ = [0] * no_of_processes for i in range(_UpperCAmelCase ): a_ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __lowerCAmelCase =4 __lowerCAmelCase =[2, 5, 3, 7] __lowerCAmelCase =[0, 0, 0, 0] __lowerCAmelCase =calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase =calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
697
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class _snake_case ( unittest.TestCase , snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = load_tool('text-question-answering' ) self.tool.setup() a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
697
1
'''simple docstring''' import os from math import logaa def a ( _UpperCAmelCase = "base_exp.txt" ) -> int: """simple docstring""" a_ = 0 a_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ): a_ , a_ = list(map(_UpperCAmelCase , line.split(',' ) ) ) if x * logaa(_UpperCAmelCase ) > largest: a_ = x * logaa(_UpperCAmelCase ) a_ = i + 1 return result if __name__ == "__main__": print(solution())
697
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "camembert" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> Optional[int]: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache a_ = classifier_dropout class _snake_case ( snake_case ): """simple docstring""" @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
697
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "vit" def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict: super().__init__(**UpperCAmelCase__ ) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-4
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={ "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' def a ( _UpperCAmelCase = 5_0 ) -> int: """simple docstring""" a_ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase ={ "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def a ( ) -> None: """simple docstring""" print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
697
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __lowerCAmelCase ="hf-internal-testing/tiny-random-bert" __lowerCAmelCase =os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") __lowerCAmelCase ="9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = cached_file(UpperCAmelCase__ , UpperCAmelCase__ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase__ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) ) with open(os.path.join(UpperCAmelCase__ , 'refs' , 'main' ) ) as f: a_ = f.read() self.assertEqual(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'snapshots' , UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase__ ) ) # File is cached at the same place the second time. a_ = cached_file(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Using a specific revision to test the full commit hash. a_ = cached_file(UpperCAmelCase__ , UpperCAmelCase__ , revision='9b8c223' ) self.assertEqual(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'snapshots' , UpperCAmelCase__ , UpperCAmelCase__ ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid model identifier' ): a_ = cached_file('tiny-random-bert' , UpperCAmelCase__ ) with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid git identifier' ): a_ = cached_file(UpperCAmelCase__ , UpperCAmelCase__ , revision='aaaa' ) with self.assertRaisesRegex(UpperCAmelCase__ , 'does not appear to have a file named' ): a_ = cached_file(UpperCAmelCase__ , 'conf' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: with self.assertRaisesRegex(UpperCAmelCase__ , 'does not appear to have a file named' ): a_ = cached_file(UpperCAmelCase__ , 'conf' ) with open(os.path.join(UpperCAmelCase__ , 'refs' , 'main' ) ) as f: a_ = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '.no_exist' , UpperCAmelCase__ , 'conf' ) ) ) a_ = cached_file(UpperCAmelCase__ , 'conf' , _raise_exceptions_for_missing_entries=UpperCAmelCase__ ) self.assertIsNone(UpperCAmelCase__ ) a_ = cached_file(UpperCAmelCase__ , 'conf' , local_files_only=UpperCAmelCase__ , _raise_exceptions_for_missing_entries=UpperCAmelCase__ ) self.assertIsNone(UpperCAmelCase__ ) a_ = mock.Mock() a_ = 500 a_ = {} a_ = HTTPError a_ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=UpperCAmelCase__ ) as mock_head: a_ = cached_file(UpperCAmelCase__ , 'conf' , _raise_exceptions_for_connection_errors=UpperCAmelCase__ ) self.assertIsNone(UpperCAmelCase__ ) # This check we did call the fake head request mock_head.assert_called() def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase__ ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase__ ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase__ ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid model identifier' ): get_file_from_repo('bert-base-case' , UpperCAmelCase__ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid git identifier' ): get_file_from_repo('bert-base-cased' , UpperCAmelCase__ , revision='ahaha' ) a_ = get_file_from_repo('bert-base-cased' , UpperCAmelCase__ ) # The name is the cached name which is not very easy to test, so instead we load the content. a_ = json.loads(open(UpperCAmelCase__ , 'r' ).read() ) self.assertEqual(config['hidden_size'] , 768 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: a_ = Path(UpperCAmelCase__ ) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase__ , 'a.txt' ) , str(UpperCAmelCase__ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase__ , 'b.txt' ) )
697
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a ( _UpperCAmelCase ) -> int: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str: a_ = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' ) download_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = model a_ = cache a_ = force a_ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase ={ "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' def a ( _UpperCAmelCase ) -> int: """simple docstring""" assert column_title.isupper() a_ = 0 a_ = len(_UpperCAmelCase ) - 1 a_ = 0 while index >= 0: a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["vqvae"] def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Dict: super().__init__() self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , mel=UpperCAmelCase__ , vqvae=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return 50 if isinstance(self.scheduler , UpperCAmelCase__ ) else 1000 @torch.no_grad() def __call__( self , UpperCAmelCase__ = 1 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 0 , UpperCAmelCase__ = 0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 0 , UpperCAmelCase__ = 0 , UpperCAmelCase__ = None , UpperCAmelCase__ = 0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: a_ = steps or self.get_default_steps() self.scheduler.set_timesteps(UpperCAmelCase__ ) a_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: a_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: a_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=UpperCAmelCase__ , device=self.device , ) a_ = noise a_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.mel.audio_slice_to_image(UpperCAmelCase__ ) a_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) a_ = (input_image / 255) * 2 - 1 a_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: a_ = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase__ , 0 ) ).latent_dist.sample( generator=UpperCAmelCase__ )[0] a_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: a_ = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler.timesteps[start_step - 1] ) a_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) a_ = int(mask_start_secs * pixels_per_second ) a_ = int(mask_end_secs * pixels_per_second ) a_ = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , UpperCAmelCase__ ): a_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )['sample'] else: a_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ )['sample'] if isinstance(self.scheduler , UpperCAmelCase__ ): a_ = self.scheduler.step( model_output=UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , )['prev_sample'] else: a_ = self.scheduler.step( model_output=UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , generator=UpperCAmelCase__ , )['prev_sample'] if mask is not None: if mask_start > 0: a_ = mask[:, step, :, :mask_start] if mask_end > 0: a_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance a_ = 1 / self.vqvae.config.scaling_factor * images a_ = self.vqvae.decode(UpperCAmelCase__ )['sample'] a_ = (images / 2 + 0.5).clamp(0 , 1 ) a_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() a_ = (images * 255).round().astype('uint8' ) a_ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(UpperCAmelCase__ , mode='RGB' ).convert('L' ) for _ in images) ) a_ = [self.mel.image_to_audio(UpperCAmelCase__ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCAmelCase__ ) ) @torch.no_grad() def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = 50 ) -> np.ndarray: assert isinstance(self.scheduler , UpperCAmelCase__ ) self.scheduler.set_timesteps(UpperCAmelCase__ ) a_ = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) a_ = (sample / 255) * 2 - 1 a_ = torch.Tensor(UpperCAmelCase__ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): a_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps a_ = self.scheduler.alphas_cumprod[t] a_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) a_ = 1 - alpha_prod_t a_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ )['sample'] a_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output a_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) a_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> torch.Tensor: a_ = acos(torch.dot(torch.flatten(UpperCAmelCase__ ) , torch.flatten(UpperCAmelCase__ ) ) / torch.norm(UpperCAmelCase__ ) / torch.norm(UpperCAmelCase__ ) ) return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase__ )
697
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]: super().__init__( feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = spectrogram_length a_ = num_channels a_ = patch_size a_ = feature_size // self.patch_size[1] a_ = n_fft a_ = sampling_rate // hop_length_to_sampling_rate a_ = sampling_rate a_ = padding_value a_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray: a_ = spectrogram( UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) a_ = log_spec[:, :-1] a_ = log_spec - 2_0.0 a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a_ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ): a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa ) elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a_ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a_ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCAmelCase__ ): a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a_ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a_ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) # convert into correct format for padding a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a_ = padded_audio_features * self.padding_value for i in range(len(UpperCAmelCase__ ) ): a_ = audio_features[i] a_ = feature # return as BatchFeature if return_attention_mask: a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: a_ = {'audio_values': padded_audio_features} a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) return encoded_inputs
697
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _snake_case : """simple docstring""" _UpperCamelCase = 42 _UpperCamelCase = None _UpperCamelCase = None def a ( ) -> Node | None: """simple docstring""" a_ = Node(1 ) a_ = Node(2 ) a_ = Node(3 ) a_ = Node(4 ) a_ = Node(5 ) return tree def a ( _UpperCAmelCase ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def a ( _UpperCAmelCase ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def a ( _UpperCAmelCase ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def a ( _UpperCAmelCase ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def a ( _UpperCAmelCase ) -> Sequence[Node | None]: """simple docstring""" a_ = [] if root is None: return output a_ = deque([root] ) while process_queue: a_ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Sequence[Node | None]: """simple docstring""" a_ = [] def populate_output(_UpperCAmelCase , _UpperCAmelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(_UpperCAmelCase , _UpperCAmelCase ) return output def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Sequence[Node | None]: """simple docstring""" a_ = [] def populate_output(_UpperCAmelCase , _UpperCAmelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(_UpperCAmelCase , _UpperCAmelCase ) return output def a ( _UpperCAmelCase ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] a_ = [] a_ = 0 a_ = height(_UpperCAmelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(_UpperCAmelCase , _UpperCAmelCase ) ) a_ = 1 else: output.append(get_nodes_from_right_to_left(_UpperCAmelCase , _UpperCAmelCase ) ) a_ = 0 return output def a ( ) -> None: # Main function for testing. """simple docstring""" a_ = make_tree() print(F'''In-order Traversal: {inorder(_UpperCAmelCase )}''' ) print(F'''Pre-order Traversal: {preorder(_UpperCAmelCase )}''' ) print(F'''Post-order Traversal: {postorder(_UpperCAmelCase )}''' , '\n' ) print(F'''Height of Tree: {height(_UpperCAmelCase )}''' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(_UpperCAmelCase ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(_UpperCAmelCase ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(_UpperCAmelCase , level=_UpperCAmelCase ) ) print('\nZigZag order Traversal: ' ) print(zigzag(_UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = len(set_a.intersection(_UpperCAmelCase ) ) if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) else: a_ = len(set_a.union(_UpperCAmelCase ) ) return intersection / union if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ): a_ = [element for element in set_a if element in set_b] if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / union else: a_ = set_a + [element for element in set_b if element not in set_a] return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return None if __name__ == "__main__": __lowerCAmelCase ={"a", "b", "c", "d", "e"} __lowerCAmelCase ={"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
697
1
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["image_processor", "tokenizer"] _UpperCamelCase = "BridgeTowerImageProcessor" _UpperCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict: super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = True , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> BatchEncoding: a_ = self.tokenizer( text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , ) # add pixel_values + pixel_mask a_ = self.image_processor( UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_center_crop=UpperCAmelCase__ , **UpperCAmelCase__ ) encoding.update(UpperCAmelCase__ ) return encoding def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Tuple: return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> Any: return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.tokenizer.model_input_names a_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) a_ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' def a ( _UpperCAmelCase=2_8_1_2_3 ) -> int: """simple docstring""" a_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i a_ = set() a_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(_UpperCAmelCase ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
697
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str: super().__init__( features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = Generator( cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: # Build iterable dataset if self.streaming: a_ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: a_ = None a_ = None a_ = None a_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , ) a_ = self.builder.as_dataset( split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
697
1
'''simple docstring''' import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __lowerCAmelCase =logging.getLogger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "token-classification" def __init__( self , UpperCAmelCase__ ) -> Optional[Any]: if type(UpperCAmelCase__ ) == dict: a_ = Namespace(**UpperCAmelCase__ ) a_ = import_module('tasks' ) try: a_ = getattr(UpperCAmelCase__ , hparams.task_type ) a_ = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) a_ = self.token_classification_task.get_labels(hparams.labels ) a_ = CrossEntropyLoss().ignore_index super().__init__(UpperCAmelCase__ , len(self.labels ) , self.mode ) def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> Optional[Any]: return self.model(**UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: a_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": a_ = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids a_ = self(**UpperCAmelCase__ ) a_ = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.hparams for mode in ["train", "dev", "test"]: a_ = self._feature_file(UpperCAmelCase__ ) if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , UpperCAmelCase__ ) a_ = torch.load(UpperCAmelCase__ ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) a_ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase__ ) a_ = self.token_classification_task.convert_examples_to_features( UpperCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase__ , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info('Saving features into cached file %s' , UpperCAmelCase__ ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> DataLoader: a_ = self._feature_file(UpperCAmelCase__ ) logger.info('Loading features from cached file %s' , UpperCAmelCase__ ) a_ = torch.load(UpperCAmelCase__ ) a_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) a_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: a_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: a_ = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) a_ = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: """Compute validation""" "" a_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": a_ = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids a_ = self(**UpperCAmelCase__ ) a_ , a_ = outputs[:2] a_ = logits.detach().cpu().numpy() a_ = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]: a_ = torch.stack([x['val_loss'] for x in outputs] ).mean() a_ = np.concatenate([x['pred'] for x in outputs] , axis=0 ) a_ = np.argmax(UpperCAmelCase__ , axis=2 ) a_ = np.concatenate([x['target'] for x in outputs] , axis=0 ) a_ = dict(enumerate(self.labels ) ) a_ = [[] for _ in range(out_label_ids.shape[0] )] a_ = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) a_ = { 'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), 'precision': precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), 'recall': recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), 'f1': fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } a_ = dict(results.items() ) a_ = results return ret, preds_list, out_label_list def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Dict: # when stable a_ , a_ , a_ = self._eval_end(UpperCAmelCase__ ) a_ = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[Any]: # updating to test_epoch_end instead of deprecated test_end a_ , a_ , a_ = self._eval_end(UpperCAmelCase__ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 a_ = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int: # Add NER specific options BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ ) parser.add_argument( '--task_type' , default='NER' , type=UpperCAmelCase__ , help='Task type to fine tune in training (e.g. NER, POS, etc)' ) parser.add_argument( '--max_seq_length' , default=128 , type=UpperCAmelCase__ , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--labels' , default='' , type=UpperCAmelCase__ , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , ) parser.add_argument( '--gpus' , default=0 , type=UpperCAmelCase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __lowerCAmelCase =NERTransformer.add_model_specific_args(parser, os.getcwd()) __lowerCAmelCase =parser.parse_args() __lowerCAmelCase =NERTransformer(args) __lowerCAmelCase =generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
697
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __lowerCAmelCase =numpy.array([0, 0]) __lowerCAmelCase =numpy.array([0.5, 0.866_0254]) __lowerCAmelCase =numpy.array([1, 0]) __lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = initial_vectors for _ in range(_UpperCAmelCase ): a_ = iteration_step(_UpperCAmelCase ) return vectors def a ( _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = [] for i, start_vector in enumerate(vectors[:-1] ): a_ = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) a_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray: """simple docstring""" a_ = numpy.radians(_UpperCAmelCase ) a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) a_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase , _UpperCAmelCase ) def a ( _UpperCAmelCase ) -> None: """simple docstring""" a_ = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase , _UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
697
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _snake_case ( snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = tempfile.mkdtemp() a_ = 5 # Realm tok a_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] a_ = os.path.join(self.tmpdirname , 'realm_tokenizer' ) os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) a_ = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) a_ = os.path.join(self.tmpdirname , 'realm_block_records' ) os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = RealmConfig(num_block_records=self.num_block_records ) return config def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], } ) return dataset def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = np.array( [ B'This is the first record', B'This is the second record', B'This is the third record', B'This is the fourth record', B'This is the fifth record', B'This is a longer longer longer record', ] , dtype=UpperCAmelCase__ , ) return block_records def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.get_config() a_ = self.get_dummy_retriever() a_ = retriever.tokenizer a_ = np.array([0, 3] , dtype='long' ) a_ = tokenizer(['Test question'] ).input_ids a_ = tokenizer( ['the fourth'] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids a_ = config.reader_seq_len a_ , a_ , a_ , a_ = retriever( UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='np' ) self.assertEqual(len(UpperCAmelCase__ ) , 2 ) self.assertEqual(len(UpperCAmelCase__ ) , 2 ) self.assertEqual(len(UpperCAmelCase__ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = self.get_config() a_ = self.get_dummy_retriever() a_ = retriever.tokenizer a_ = np.array([0, 3, 5] , dtype='long' ) a_ = tokenizer(['Test question'] ).input_ids a_ = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids a_ = config.reader_seq_len a_ , a_ , a_ , a_ = retriever( UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='np' ) self.assertEqual([False, True, True] , UpperCAmelCase__ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) # Test local path a_ = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) self.assertEqual(retriever.block_records[0] , B'This is the first record' ) # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download: a_ = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME ) a_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' ) self.assertEqual(retriever.block_records[0] , B'This is the first record' )
697
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a ( _UpperCAmelCase ) -> int: """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" for char in word: a_ = ord(_UpperCAmelCase ) if not _is_chinese_char(_UpperCAmelCase ): return 0 return 1 def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = set() for token in tokens: a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase ) if chinese_word: word_set.add(_UpperCAmelCase ) a_ = list(_UpperCAmelCase ) return word_list def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" if not chinese_word_set: return bert_tokens a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] ) a_ = bert_tokens a_ , a_ = 0, len(_UpperCAmelCase ) while start < end: a_ = True if is_chinese(bert_word[start] ): a_ = min(end - start , _UpperCAmelCase ) for i in range(_UpperCAmelCase , 1 , -1 ): a_ = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ = '##' + bert_word[j] a_ = start + i a_ = False break if single_word: start += 1 return bert_word def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws a_ = [get_chinese_word(_UpperCAmelCase ) for r in res] ltp_res.extend(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 ) bert_res.extend(res['input_ids'] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ): a_ = [] for id in input_ids: a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase ) input_tokens.append(_UpperCAmelCase ) a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase ) a_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_UpperCAmelCase ): if token[:2] == "##": a_ = token[2:] # save chinese tokens' pos if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ): ref_id.append(_UpperCAmelCase ) ref_ids.append(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) return ref_ids def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: a_ = f.readlines() a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ = LTP(args.ltp ) # faster in GPU device a_ = BertTokenizer.from_pretrained(args.bert ) a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids] f.writelines(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __lowerCAmelCase =parser.parse_args() main(args)
697
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = RobertaTokenizer _UpperCamelCase = RobertaTokenizerFast _UpperCamelCase = True _UpperCamelCase = {"cls_token": "<s>"} def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a_ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] a_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) a_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] a_ = {'unk_token': '<unk>'} a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(UpperCAmelCase__ ) ) def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> int: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[Any]: a_ = 'lower newer' a_ = 'lower newer' return input_text, output_text def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a_ = 'lower newer' a_ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] a_ = tokenizer.tokenize(UpperCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = tokens + [tokenizer.unk_token] a_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCAmelCase__ ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCAmelCase__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = self.tokenizer_class.from_pretrained('roberta-base' ) a_ = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase__ ) a_ = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase__ ) a_ = tokenizer.encode( 'sequence builders' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ) a_ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ) a_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) a_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = self.get_tokenizer() a_ = 'Encode this sequence.' a_ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ) a_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ ) a_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) a_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Testing spaces after special tokens a_ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )} ) # mask token has a left space a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) a_ = 'Encode <mask> sequence' a_ = 'Encode <mask>sequence' a_ = tokenizer.encode(UpperCAmelCase__ ) a_ = encoded.index(UpperCAmelCase__ ) a_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = tokenizer.encode(UpperCAmelCase__ ) a_ = encoded.index(UpperCAmelCase__ ) a_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = 'A, <mask> AllenNLP sentence.' a_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) a_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) a_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) a_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( UpperCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a_ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCAmelCase__ ) self.assertEqual(post_processor_state['add_prefix_space'] , UpperCAmelCase__ ) self.assertEqual(post_processor_state['trim_offsets'] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a_ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` a_ = F'''{text_of_1_token} {text_of_1_token}''' a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , ) a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , ) a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , ) a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , ) a_ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , ) a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , ) a_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ ) a_ = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
697
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __lowerCAmelCase ={ "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": 512, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = RetriBertTokenizer _UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars ): a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) ) a_ = do_lower_case a_ = strip_accents a_ = tokenize_chinese_chars a_ = normalizer_class(**UpperCAmelCase__ ) a_ = do_lower_case def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str: a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]: a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
697
1
'''simple docstring''' import pytest __lowerCAmelCase ="__dummy_dataset1__" __lowerCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def a ( ) -> Any: """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def a ( ) -> Dict: """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: """simple docstring""" a_ = dataset_loading_script_name a_ = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=_UpperCAmelCase ) a_ = script_dir / F'''{script_name}.py''' with open(_UpperCAmelCase , 'w' ) as f: f.write(_UpperCAmelCase ) return str(_UpperCAmelCase )
697
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "lilt" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = classifier_dropout a_ = channel_shrink_ratio a_ = max_ad_position_embeddings
697
1
'''simple docstring''' def a ( _UpperCAmelCase = 1_0 , _UpperCAmelCase = 1_0_0_0 , _UpperCAmelCase = True ) -> int: """simple docstring""" assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' ) return min_val if option else max_val def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" return int((number_a + number_a) / 2 ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: """simple docstring""" assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)' ) if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value' ) def answer(_UpperCAmelCase ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...' ) a_ = lower a_ = higher a_ = [] while True: a_ = get_avg(_UpperCAmelCase , _UpperCAmelCase ) last_numbers.append(_UpperCAmelCase ) if answer(_UpperCAmelCase ) == "low": a_ = number elif answer(_UpperCAmelCase ) == "high": a_ = number else: break print(F'''guess the number : {last_numbers[-1]}''' ) print(F'''details : {last_numbers!s}''' ) def a ( ) -> None: """simple docstring""" a_ = int(input('Enter lower value : ' ).strip() ) a_ = int(input('Enter high value : ' ).strip() ) a_ = int(input('Enter value to guess : ' ).strip() ) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
697
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # We need to create solution object to save path. a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase ) if solved: print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): a_ = 1 return True a_ = (not i < 0) and (not j < 0) # Check lower bounds a_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. a_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited a_ = 1 # check for directions if ( run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase ) ): return True a_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def a ( _UpperCAmelCase ) -> Optional[int]: """simple docstring""" def is_in_circle(_UpperCAmelCase , _UpperCAmelCase ) -> bool: a_ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a_ = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(_UpperCAmelCase ) ) # The ratio of the area for circle to square is pi/4. a_ = proportion * 4 print(F'''The estimated value of pi is {pi_estimate}''' ) print(F'''The numpy value of pi is {pi}''' ) print(F'''The total error is {abs(pi - pi_estimate )}''' ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(_UpperCAmelCase , _UpperCAmelCase ) ) for _ in range(_UpperCAmelCase ) ) * (max_value - min_value) def a ( _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 ) -> None: """simple docstring""" def identity_function(_UpperCAmelCase ) -> float: return x a_ = area_under_curve_estimator( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) a_ = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {expected_value}''' ) print(F'''Total error is {abs(estimated_value - expected_value )}''' ) print('******************' ) def a ( _UpperCAmelCase ) -> None: """simple docstring""" def function_to_integrate(_UpperCAmelCase ) -> float: return sqrt(4.0 - x * x ) a_ = area_under_curve_estimator( _UpperCAmelCase , _UpperCAmelCase , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {pi}''' ) print(F'''Total error is {abs(estimated_value - pi )}''' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
697
'''simple docstring''' __lowerCAmelCase ={ "meter": "m", "kilometer": "km", "megametre": "Mm", "gigametre": "Gm", "terametre": "Tm", "petametre": "Pm", "exametre": "Em", "zettametre": "Zm", "yottametre": "Ym", } # Exponent of the factor(meter) __lowerCAmelCase ={ "m": 0, "km": 3, "Mm": 6, "Gm": 9, "Tm": 12, "Pm": 15, "Em": 18, "Zm": 21, "Ym": 24, } def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float: """simple docstring""" a_ = from_type.lower().strip('s' ) a_ = to_type.lower().strip('s' ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) if from_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) if to_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) a_ = METRIC_CONVERSION[from_sanitized] a_ = METRIC_CONVERSION[to_sanitized] a_ = 1 if from_exponent > to_exponent: a_ = from_exponent - to_exponent else: a_ = -(to_exponent - from_exponent) return value * pow(1_0 , _UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: """simple docstring""" a_ = k_size // 2 a_ , a_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] a_ = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) ) return g def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: """simple docstring""" a_ , a_ = image.shape[0], image.shape[1] # dst image height and width a_ = height - k_size + 1 a_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows a_ = zeros((dst_height * dst_width, k_size * k_size) ) a_ = 0 for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ): a_ = ravel(image[i : i + k_size, j : j + k_size] ) a_ = window row += 1 # turn the kernel into shape(k*k, 1) a_ = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase ) a_ = ravel(_UpperCAmelCase ) # reshape and get the dst image a_ = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase ) return dst if __name__ == "__main__": # read original image __lowerCAmelCase =imread(r"../image_data/lena.jpg") # turn image in gray scale value __lowerCAmelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size __lowerCAmelCase =gaussian_filter(gray, 3, sigma=1) __lowerCAmelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
697
'''simple docstring''' import unittest from transformers import DonutProcessor __lowerCAmelCase ="naver-clova-ix/donut-base" class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } a_ = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) a_ = self.processor.tokenajson(UpperCAmelCase__ ) self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" for attribute in key.split('.' ): a_ = getattr(_UpperCAmelCase , _UpperCAmelCase ) if weight_type is not None: a_ = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape else: a_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": a_ = value elif weight_type == "weight_g": a_ = value elif weight_type == "weight_v": a_ = value elif weight_type == "bias": a_ = value else: a_ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: """simple docstring""" a_ = [] a_ = fairseq_model.state_dict() a_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): a_ = False if "conv_layers" in name: load_conv_layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) a_ = True else: for key, mapped_key in MAPPING.items(): a_ = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): a_ = True if "*" in mapped_key: a_ = name.split(_UpperCAmelCase )[0].split('.' )[-2] a_ = mapped_key.replace('*' , _UpperCAmelCase ) if "weight_g" in name: a_ = 'weight_g' elif "weight_v" in name: a_ = 'weight_v' elif "weight" in name: a_ = 'weight' elif "bias" in name: a_ = 'bias' else: a_ = None set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) continue if not is_used: unused_weights.append(_UpperCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" a_ = full_name.split('conv_layers.' )[-1] a_ = name.split('.' ) a_ = int(items[0] ) a_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) a_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_UpperCAmelCase ) @torch.no_grad() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ) -> List[Any]: """simple docstring""" if config_path is not None: a_ = HubertConfig.from_pretrained(_UpperCAmelCase ) else: a_ = HubertConfig() if is_finetuned: if dict_path: a_ = Dictionary.load(_UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a_ = target_dict.pad_index a_ = target_dict.bos_index a_ = target_dict.eos_index a_ = len(target_dict.symbols ) a_ = os.path.join(_UpperCAmelCase , 'vocab.json' ) if not os.path.isdir(_UpperCAmelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) ) return os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , _UpperCAmelCase ) a_ = WavaVecaCTCTokenizer( _UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , ) a_ = True if config.feat_extract_norm == 'layer' else False a_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ) a_ = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) a_ = HubertForCTC(_UpperCAmelCase ) else: a_ = HubertModel(_UpperCAmelCase ) if is_finetuned: a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) a_ = model[0].eval() recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) hf_wavavec.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __lowerCAmelCase =parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]: a_ = parent a_ = out_indices if out_indices is not None else [4] a_ = stage_names a_ = out_features a_ = backbone a_ = batch_size a_ = image_size a_ = num_channels a_ = use_pretrained_backbone a_ = is_training def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = TimmBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(UpperCAmelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = self.prepare_config_and_inputs() a_ , a_ = config_and_inputs a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (TimmBackbone,) if is_torch_available() else () _UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = TimmBackboneModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'resnet18' a_ = 'microsoft/resnet-18' a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Safetensors is not supported by timm.' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = True a_ = self.has_attentions # no need to test all models as different heads yield the same functionality a_ = self.all_model_classes[0] a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ) a_ = outputs[0][-1] # Encoder-/Decoder-only models a_ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a_ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=UpperCAmelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = None a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = False a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ )
697
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]: a_ = size if size is not None else {'height': 18, 'width': 18} a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = min_resolution a_ = max_resolution a_ = do_resize a_ = size a_ = apply_ocr def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = LayoutLMvaImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase__ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase__ ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # with apply_OCR = True a_ = LayoutLMvaImageProcessor() from datasets import load_dataset a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) a_ = Image.open(ds[0]['file'] ).convert('RGB' ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase__ ) self.assertListEqual(encoding.boxes , UpperCAmelCase__ ) # with apply_OCR = False a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
697
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __lowerCAmelCase ={ "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } __lowerCAmelCase ={ "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["input_ids", "attention_mask"] _UpperCamelCase = RobertaTokenizer def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="replace" , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ) -> Dict: super().__init__( UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space: a_ = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) ) a_ = add_prefix_space a_ = pre_tok_class(**UpperCAmelCase__ ) a_ = add_prefix_space a_ = 'post_processor' a_ = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ ) if tokenizer_component_instance: a_ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a_ = tuple(state['sep'] ) if "cls" in state: a_ = tuple(state['cls'] ) a_ = False if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space: a_ = add_prefix_space a_ = True if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets: a_ = trim_offsets a_ = True if changes_to_apply: a_ = getattr(UpperCAmelCase__ , state.pop('type' ) ) a_ = component_class(**UpperCAmelCase__ ) setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ ) @property def __SCREAMING_SNAKE_CASE ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Union[str, Any]: a_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value a_ = value def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding: a_ = kwargs.get('is_split_into_words' , UpperCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding: a_ = kwargs.get('is_split_into_words' , UpperCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]: a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
697
'''simple docstring''' import math def a ( _UpperCAmelCase ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int: """simple docstring""" try: a_ = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) a_ = [] a_ = 2 while len(_UpperCAmelCase ) < nth: if is_prime(_UpperCAmelCase ): primes.append(_UpperCAmelCase ) num += 1 else: num += 1 return primes[len(_UpperCAmelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' from functools import reduce __lowerCAmelCase =( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def a ( _UpperCAmelCase = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 1_3] ) ) for i in range(len(_UpperCAmelCase ) - 1_2 ) ) if __name__ == "__main__": print(f'''{solution() = }''')
697
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = [1, 2, 3, 4] a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = '' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) a_ , a_ = process_story(UpperCAmelCase__ ) a_ = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = ['It was the best of times.'] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = torch.tensor([1, 2, 3, 4] ) a_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = 101 a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ ) np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase =logging.get_logger(__name__) def a ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Dict: """simple docstring""" a_ = 'backbone.' if is_semantic else '' a_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Optional[int]: """simple docstring""" for i in range(config.num_hidden_layers ): a_ = 'backbone.' if is_semantic else '' # queries, keys and values a_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) a_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) a_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) a_ = in_proj_weight[ : config.hidden_size, : ] a_ = q_bias a_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a_ = in_proj_weight[ -config.hidden_size :, : ] a_ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained a_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) a_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) a_ = gamma_a a_ = gamma_a def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = dct.pop(_UpperCAmelCase ) a_ = val def a ( ) -> str: """simple docstring""" a_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' a_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: """simple docstring""" a_ = False if 'rvlcdip' in checkpoint_url else True a_ = BeitConfig(use_absolute_position_embeddings=_UpperCAmelCase , use_mask_token=_UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: a_ = 1_0_2_4 a_ = 4_0_9_6 a_ = 2_4 a_ = 1_6 # labels if "rvlcdip" in checkpoint_url: a_ = 1_6 a_ = 'huggingface/label-files' a_ = 'rvlcdip-id2label.json' a_ = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) a_ = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} a_ = idalabel a_ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys a_ = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['model'] a_ = create_rename_keys(_UpperCAmelCase , has_lm_head=_UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , has_lm_head=_UpperCAmelCase ) # load HuggingFace model a_ = BeitForMaskedImageModeling(_UpperCAmelCase ) if has_lm_head else BeitForImageClassification(_UpperCAmelCase ) model.eval() model.load_state_dict(_UpperCAmelCase ) # Check outputs on an image a_ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase ) a_ = prepare_img() a_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ) a_ = encoding['pixel_values'] a_ = model(_UpperCAmelCase ) a_ = outputs.logits # verify logits a_ = [1, 1_6] if 'rvlcdip' in checkpoint_url else [1, 1_9_6, 8_1_9_2] assert logits.shape == torch.Size(_UpperCAmelCase ), "Shape of logits not as expected" Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: if has_lm_head: a_ = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: a_ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __lowerCAmelCase =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
697
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class _snake_case ( unittest.TestCase , snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = load_tool('text-question-answering' ) self.tool.setup() a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
697
1
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a ( _UpperCAmelCase ) -> int: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str: a_ = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' ) download_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = model a_ = cache a_ = force a_ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
697
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
1
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "AutoTokenizer" _UpperCamelCase = ["tokenizer"] _UpperCamelCase = { "semantic_prompt": 1, "coarse_prompt": 2, "fine_prompt": 2, } def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[int]: super().__init__(UpperCAmelCase__ ) a_ = speaker_embeddings @classmethod def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase__ , UpperCAmelCase__="speaker_embeddings_path.json" , **UpperCAmelCase__ ) -> List[Any]: if speaker_embeddings_dict_path is not None: a_ = get_file_from_repo( UpperCAmelCase__ , UpperCAmelCase__ , subfolder=kwargs.pop('subfolder' , UpperCAmelCase__ ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase__ ) , force_download=kwargs.pop('force_download' , UpperCAmelCase__ ) , proxies=kwargs.pop('proxies' , UpperCAmelCase__ ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase__ ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase__ ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase__ ) , revision=kwargs.pop('revision' , UpperCAmelCase__ ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) a_ = None else: with open(UpperCAmelCase__ ) as speaker_embeddings_json: a_ = json.load(UpperCAmelCase__ ) else: a_ = None a_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) return cls(tokenizer=UpperCAmelCase__ , speaker_embeddings=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__="speaker_embeddings_path.json" , UpperCAmelCase__="speaker_embeddings" , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> Dict: if self.speaker_embeddings is not None: os.makedirs(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ , 'v2' ) , exist_ok=UpperCAmelCase__ ) a_ = {} a_ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": a_ = self._load_voice_preset(UpperCAmelCase__ ) a_ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , UpperCAmelCase__ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=UpperCAmelCase__ , ) a_ = os.path.join(UpperCAmelCase__ , F'''{prompt_key}_{key}.npy''' ) a_ = tmp_dict with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , 'w' ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = None , **UpperCAmelCase__ ) -> Tuple: a_ = self.speaker_embeddings[voice_preset] a_ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) a_ = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase__ ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase__ ) , force_download=kwargs.pop('force_download' , UpperCAmelCase__ ) , proxies=kwargs.pop('proxies' , UpperCAmelCase__ ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase__ ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase__ ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase__ ) , revision=kwargs.pop('revision' , UpperCAmelCase__ ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) a_ = np.load(UpperCAmelCase__ ) return voice_preset_dict def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = None ) -> Union[str, Any]: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="pt" , UpperCAmelCase__=256 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=False , **UpperCAmelCase__ , ) -> str: if voice_preset is not None and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): if ( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): a_ = self._load_voice_preset(UpperCAmelCase__ ) else: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not voice_preset.endswith('.npz' ): a_ = voice_preset + '.npz' a_ = np.load(UpperCAmelCase__ ) if voice_preset is not None: self._validate_voice_preset_dict(UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) a_ = self.tokenizer( UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , padding='max_length' , max_length=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) if voice_preset is not None: a_ = voice_preset return encoded_text
697
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "vit" def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict: super().__init__(**UpperCAmelCase__ ) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-4
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase ={ "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] __lowerCAmelCase =[ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] __lowerCAmelCase =[ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): __lowerCAmelCase =[ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' def a ( _UpperCAmelCase = 5_0 ) -> int: """simple docstring""" a_ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=False , ) -> Tuple: a_ = size if size is not None else {'height': 20, 'width': 20} a_ = crop_size if crop_size is not None else {'height': 18, 'width': 18} a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = min_resolution a_ = max_resolution a_ = do_resize a_ = size a_ = do_center_crop a_ = crop_size a_ = do_normalize a_ = image_mean a_ = image_std a_ = do_reduce_labels def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def a ( ) -> str: """simple docstring""" a_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) a_ = Image.open(dataset[0]['file'] ) a_ = Image.open(dataset[1]['file'] ) return image, map def a ( ) -> str: """simple docstring""" a_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) a_ = Image.open(ds[0]['file'] ) a_ = Image.open(ds[1]['file'] ) a_ = Image.open(ds[2]['file'] ) a_ = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = BeitImageProcessor if is_vision_available() else None def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = BeitImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'center_crop' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 20, 'width': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase__ ) a_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase__ ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) a_ = [] for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input a_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test batched a_ = image_processing(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test not batched input (PIL images) a_ , a_ = prepare_semantic_single_inputs() a_ = image_processing(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test batched input (PIL images) a_ , a_ = prepare_semantic_batch_inputs() a_ = image_processing(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 a_ , a_ = prepare_semantic_single_inputs() a_ = image_processing(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 150 ) a_ = True a_ = image_processing(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 )
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def a ( ) -> None: """simple docstring""" print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
697
1
'''simple docstring''' def a ( _UpperCAmelCase ) -> int: """simple docstring""" assert column_title.isupper() a_ = 0 a_ = len(_UpperCAmelCase ) - 1 a_ = 0 while index >= 0: a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
697
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a ( _UpperCAmelCase ) -> int: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str: a_ = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' ) download_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = model a_ = cache a_ = force a_ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
697
1
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) __lowerCAmelCase =logging.getLogger(__name__) def a ( ) -> str: """simple docstring""" a_ = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' ) a_ = parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": a_ = BertTokenizer.from_pretrained(args.tokenizer_name ) a_ = tokenizer.special_tokens_map['cls_token'] # `[CLS]` a_ = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": a_ = RobertaTokenizer.from_pretrained(args.tokenizer_name ) a_ = tokenizer.special_tokens_map['cls_token'] # `<s>` a_ = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": a_ = GPTaTokenizer.from_pretrained(args.tokenizer_name ) a_ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` a_ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: a_ = fp.readlines() logger.info('Start encoding' ) logger.info(F'''{len(_UpperCAmelCase )} examples to process.''' ) a_ = [] a_ = 0 a_ = 1_0_0_0_0 a_ = time.time() for text in data: a_ = F'''{bos} {text.strip()} {sep}''' a_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) rslt.append(_UpperCAmelCase ) iter += 1 if iter % interval == 0: a_ = time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) a_ = time.time() logger.info('Finished binarization' ) logger.info(F'''{len(_UpperCAmelCase )} examples processed.''' ) a_ = F'''{args.dump_file}.{args.tokenizer_name}.pickle''' a_ = tokenizer.vocab_size if vocab_size < (1 << 1_6): a_ = [np.uintaa(_UpperCAmelCase ) for d in rslt] else: a_ = [np.intaa(_UpperCAmelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(_UpperCAmelCase , 'wb' ) as handle: pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
697
'''simple docstring''' def a ( _UpperCAmelCase ) -> int: """simple docstring""" assert column_title.isupper() a_ = 0 a_ = len(_UpperCAmelCase ) - 1 a_ = 0 while index >= 0: a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> str: a_ = params a_ = np.array(UpperCAmelCase__ ) a_ = np.array([len(UpperCAmelCase__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , UpperCAmelCase__ ) -> str: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> List[str]: return len(self.lengths ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.params.max_model_input_size a_ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCAmelCase__ )} too long sequences.''' ) def divide_chunks(UpperCAmelCase__ , UpperCAmelCase__ ): return [l[i : i + n] for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )] a_ = [] a_ = [] if self.params.mlm: a_ , a_ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: a_ , a_ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: a_ = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: a_ = np.insert(UpperCAmelCase__ , 0 , UpperCAmelCase__ ) if sub_s[-1] != sep_id: a_ = np.insert(UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ ) assert len(UpperCAmelCase__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCAmelCase__ ) new_tok_ids.extend(UpperCAmelCase__ ) new_lengths.extend([len(UpperCAmelCase__ ) for l in sub_seqs] ) a_ = np.array(UpperCAmelCase__ ) a_ = np.array(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = len(self ) a_ = self.lengths > 11 a_ = self.token_ids[indices] a_ = self.lengths[indices] a_ = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: if "unk_token" not in self.params.special_tok_ids: return else: a_ = self.params.special_tok_ids['unk_token'] a_ = len(self ) a_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) a_ = (unk_occs / self.lengths) < 0.5 a_ = self.token_ids[indices] a_ = self.lengths[indices] a_ = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]: a_ = [t[0] for t in batch] a_ = [t[1] for t in batch] assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # Max for paddings a_ = max(UpperCAmelCase__ ) # Pad token ids if self.params.mlm: a_ = self.params.special_tok_ids['pad_token'] else: a_ = self.params.special_tok_ids['unk_token'] a_ = [list(t.astype(UpperCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase__ )) for t in token_ids] assert len(tk_ ) == len(UpperCAmelCase__ ) assert all(len(UpperCAmelCase__ ) == max_seq_len_ for t in tk_ ) a_ = torch.tensor(tk_ ) # (bs, max_seq_len_) a_ = torch.tensor(UpperCAmelCase__ ) # (bs) return tk_t, lg_t
697
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]: super().__init__( feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = spectrogram_length a_ = num_channels a_ = patch_size a_ = feature_size // self.patch_size[1] a_ = n_fft a_ = sampling_rate // hop_length_to_sampling_rate a_ = sampling_rate a_ = padding_value a_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray: a_ = spectrogram( UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) a_ = log_spec[:, :-1] a_ = log_spec - 2_0.0 a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a_ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ): a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa ) elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a_ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a_ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCAmelCase__ ): a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a_ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a_ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) # convert into correct format for padding a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a_ = padded_audio_features * self.padding_value for i in range(len(UpperCAmelCase__ ) ): a_ = audio_features[i] a_ = feature # return as BatchFeature if return_attention_mask: a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: a_ = {'audio_values': padded_audio_features} a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) return encoded_inputs
697
1
'''simple docstring''' def a ( _UpperCAmelCase = 1_0**9 ) -> int: """simple docstring""" a_ = 1 a_ = 2 a_ = 0 a_ = 0 a_ = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value a_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = len(set_a.intersection(_UpperCAmelCase ) ) if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) else: a_ = len(set_a.union(_UpperCAmelCase ) ) return intersection / union if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ): a_ = [element for element in set_a if element in set_b] if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / union else: a_ = set_a + [element for element in set_b if element not in set_a] return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return None if __name__ == "__main__": __lowerCAmelCase ={"a", "b", "c", "d", "e"} __lowerCAmelCase ={"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
697
1
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __lowerCAmelCase =trt.Logger(trt.Logger.WARNING) __lowerCAmelCase =absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __lowerCAmelCase =logging.getLogger(__name__) __lowerCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--onnx_model_path", default=None, type=str, required=True, help="Path to ONNX model: ", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--tokenizer_name", default="", type=str, required=True, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--dataset_name", type=str, default=None, required=True, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data." ) parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision instead of 32-bit", ) parser.add_argument( "--int8", action="store_true", help="Whether to use INT8", ) __lowerCAmelCase =parser.parse_args() if args.tokenizer_name: __lowerCAmelCase =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) logger.info("Training/evaluation parameters %s", args) __lowerCAmelCase =args.per_device_eval_batch_size __lowerCAmelCase =(args.eval_batch_size, args.max_seq_length) # TRT Engine properties __lowerCAmelCase =True __lowerCAmelCase ="temp_engine/bert-fp32.engine" if args.fpaa: __lowerCAmelCase ="temp_engine/bert-fp16.engine" if args.inta: __lowerCAmelCase ="temp_engine/bert-int8.engine" # import ONNX file if not os.path.exists("temp_engine"): os.makedirs("temp_engine") __lowerCAmelCase =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, "rb") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __lowerCAmelCase =[network.get_input(i) for i in range(network.num_inputs)] __lowerCAmelCase =[_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __lowerCAmelCase =1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __lowerCAmelCase =builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __lowerCAmelCase =builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, "wb") as f: f.write(engine.serialize()) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" a_ = np.asarray(inputs['input_ids'] , dtype=np.intaa ) a_ = np.asarray(inputs['attention_mask'] , dtype=np.intaa ) a_ = np.asarray(inputs['token_type_ids'] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCAmelCase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCAmelCase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCAmelCase ) # start time a_ = time.time() # Run inference context.execute_async( bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) cuda.memcpy_dtoh_async(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Synchronize the stream and take time stream.synchronize() # end time a_ = time.time() a_ = end_time - start_time a_ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __lowerCAmelCase =Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase =load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("Evaluation requires a dataset name") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __lowerCAmelCase =raw_datasets["validation"].column_names __lowerCAmelCase ="question" if "question" in column_names else column_names[0] __lowerCAmelCase ="context" if "context" in column_names else column_names[1] __lowerCAmelCase ="answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __lowerCAmelCase =tokenizer.padding_side == "right" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __lowerCAmelCase =min(args.max_seq_length, tokenizer.model_max_length) def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" a_ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. a_ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , padding='max_length' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. a_ = tokenized_examples.pop('overflow_to_sample_mapping' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. a_ = [] for i in range(len(tokenized_examples['input_ids'] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). a_ = tokenized_examples.sequence_ids(_UpperCAmelCase ) a_ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. a_ = sample_mapping[i] tokenized_examples["example_id"].append(examples['id'][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. a_ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['offset_mapping'][i] ) ] return tokenized_examples __lowerCAmelCase =raw_datasets["validation"] # Validation Feature Creation __lowerCAmelCase =eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on validation dataset", ) __lowerCAmelCase =default_data_collator __lowerCAmelCase =eval_dataset.remove_columns(["example_id", "offset_mapping"]) __lowerCAmelCase =DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="eval" ) -> Tuple: """simple docstring""" a_ = postprocess_qa_predictions( examples=_UpperCAmelCase , features=_UpperCAmelCase , predictions=_UpperCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCAmelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: a_ = [ {'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items() ] else: a_ = [{'id': k, 'prediction_text': v} for k, v in predictions.items()] a_ = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_UpperCAmelCase , label_ids=_UpperCAmelCase ) __lowerCAmelCase =load_metric("squad_v2" if args.version_2_with_negative else "squad") # Evaluation! logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path) with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize # Allocate device memory for inputs and outputs. __lowerCAmelCase =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __lowerCAmelCase =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __lowerCAmelCase =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __lowerCAmelCase =cuda.mem_alloc(h_outputa.nbytes) __lowerCAmelCase =cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __lowerCAmelCase =cuda.Stream() # Evaluation logger.info("***** Running Evaluation *****") logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __lowerCAmelCase =0.0 __lowerCAmelCase =0 __lowerCAmelCase =timeit.default_timer() __lowerCAmelCase =None for step, batch in enumerate(eval_dataloader): __lowerCAmelCase , __lowerCAmelCase =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __lowerCAmelCase , __lowerCAmelCase =outputs __lowerCAmelCase =torch.tensor(start_logits) __lowerCAmelCase =torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __lowerCAmelCase =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __lowerCAmelCase =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __lowerCAmelCase =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __lowerCAmelCase =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __lowerCAmelCase =nested_truncate(all_preds, len(eval_dataset)) __lowerCAmelCase =timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter)) logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000)) logger.info("Total Number of Inference = %d", niter) __lowerCAmelCase =post_processing_function(eval_examples, eval_dataset, all_preds) __lowerCAmelCase =metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) a_ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __lowerCAmelCase =logging.get_logger(__name__) @add_end_docstrings(snake_case ) class _snake_case ( snake_case ): """simple docstring""" def __init__( self , **UpperCAmelCase__ ) -> Any: super().__init__(**UpperCAmelCase__ ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) self.check_model_type(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> Tuple: a_ = {} a_ = {} a_ = {} # preprocess args if "points_per_batch" in kwargs: a_ = kwargs['points_per_batch'] if "points_per_crop" in kwargs: a_ = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: a_ = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: a_ = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: a_ = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: a_ = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: a_ = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: a_ = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: a_ = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: a_ = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: a_ = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: a_ = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , UpperCAmelCase__ , *UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ) -> int: return super().__call__(UpperCAmelCase__ , *UpperCAmelCase__ , num_workers=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=64 , UpperCAmelCase__ = 0 , UpperCAmelCase__ = 512 / 1500 , UpperCAmelCase__ = 32 , UpperCAmelCase__ = 1 , ) -> str: a_ = load_image(UpperCAmelCase__ ) a_ = self.image_processor.size['longest_edge'] a_ , a_ , a_ , a_ = self.image_processor.generate_crop_boxes( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.image_processor(images=UpperCAmelCase__ , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": a_ = self.get_inference_context() with inference_context(): a_ = self._ensure_tensor_on_device(UpperCAmelCase__ , device=self.device ) a_ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) a_ = image_embeddings a_ = grid_points.shape[1] a_ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ): a_ = grid_points[:, i : i + points_per_batch, :, :] a_ = input_labels[:, i : i + points_per_batch] a_ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=0.8_8 , UpperCAmelCase__=0.9_5 , UpperCAmelCase__=0 , UpperCAmelCase__=1 , ) -> Any: a_ = model_inputs.pop('input_boxes' ) a_ = model_inputs.pop('is_last' ) a_ = model_inputs.pop('original_sizes' ).tolist() a_ = model_inputs.pop('reshaped_input_sizes' ).tolist() a_ = self.model(**UpperCAmelCase__ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks a_ = model_outputs['pred_masks'] a_ = self.image_processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , binarize=UpperCAmelCase__ ) a_ = model_outputs['iou_scores'] a_ , a_ , a_ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=0.7 , ) -> str: a_ = [] a_ = [] a_ = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) a_ = torch.cat(UpperCAmelCase__ ) a_ = torch.cat(UpperCAmelCase__ ) a_ , a_ , a_ , a_ = self.image_processor.post_process_for_mask_generation( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) a_ = defaultdict(UpperCAmelCase__ ) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase__ ) a_ = {} if output_rle_mask: a_ = rle_mask if output_bboxes_mask: a_ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
697
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str: super().__init__( features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = Generator( cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: # Build iterable dataset if self.streaming: a_ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: a_ = None a_ = None a_ = None a_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , ) a_ = self.builder.as_dataset( split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
697
1
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __lowerCAmelCase =( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) __lowerCAmelCase =( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) __lowerCAmelCase =( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) __lowerCAmelCase =( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) __lowerCAmelCase =( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) __lowerCAmelCase =( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) __lowerCAmelCase =( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def a ( ) -> List[Any]: """simple docstring""" a_ , a_ = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) ) a_ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] a_ , a_ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def a ( _UpperCAmelCase = 1_0_0 ) -> int: """simple docstring""" return (generate_random_hand() for _ in range(_UpperCAmelCase )) @pytest.mark.parametrize('hand, expected' , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" assert PokerHand(_UpperCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: """simple docstring""" assert PokerHand(_UpperCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: """simple docstring""" a_ = PokerHand(_UpperCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" assert PokerHand(_UpperCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected def a ( ) -> Any: """simple docstring""" a_ = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS] a_ = poker_hands.copy() shuffle(_UpperCAmelCase ) a_ = chain(sorted(_UpperCAmelCase ) ) for index, hand in enumerate(_UpperCAmelCase ): assert hand == poker_hands[index] def a ( ) -> List[str]: """simple docstring""" a_ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=_UpperCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def a ( ) -> Tuple: """simple docstring""" a_ = PokerHand('2C 4S AS 3D 5C' ) a_ = True a_ = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def a ( ) -> str: """simple docstring""" a_ = 0 a_ = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) a_ = os.path.join(_UpperCAmelCase , 'poker_hands.txt' ) with open(_UpperCAmelCase ) as file_hand: for line in file_hand: a_ = line[:1_4].strip() a_ = line[1_5:].strip() a_ , a_ = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase ) a_ = player.compare_with(_UpperCAmelCase ) if output == "Win": answer += 1 assert answer == 3_7_6
697
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __lowerCAmelCase =numpy.array([0, 0]) __lowerCAmelCase =numpy.array([0.5, 0.866_0254]) __lowerCAmelCase =numpy.array([1, 0]) __lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = initial_vectors for _ in range(_UpperCAmelCase ): a_ = iteration_step(_UpperCAmelCase ) return vectors def a ( _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = [] for i, start_vector in enumerate(vectors[:-1] ): a_ = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) a_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray: """simple docstring""" a_ = numpy.radians(_UpperCAmelCase ) a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) a_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase , _UpperCAmelCase ) def a ( _UpperCAmelCase ) -> None: """simple docstring""" a_ = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase , _UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
697
1
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = [1, 2, 3, 4] a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = '' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) a_ , a_ = process_story(UpperCAmelCase__ ) a_ = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = ['It was the best of times.'] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = torch.tensor([1, 2, 3, 4] ) a_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = 101 a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ ) np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
697
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a ( _UpperCAmelCase ) -> int: """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" for char in word: a_ = ord(_UpperCAmelCase ) if not _is_chinese_char(_UpperCAmelCase ): return 0 return 1 def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = set() for token in tokens: a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase ) if chinese_word: word_set.add(_UpperCAmelCase ) a_ = list(_UpperCAmelCase ) return word_list def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" if not chinese_word_set: return bert_tokens a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] ) a_ = bert_tokens a_ , a_ = 0, len(_UpperCAmelCase ) while start < end: a_ = True if is_chinese(bert_word[start] ): a_ = min(end - start , _UpperCAmelCase ) for i in range(_UpperCAmelCase , 1 , -1 ): a_ = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ = '##' + bert_word[j] a_ = start + i a_ = False break if single_word: start += 1 return bert_word def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws a_ = [get_chinese_word(_UpperCAmelCase ) for r in res] ltp_res.extend(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 ) bert_res.extend(res['input_ids'] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ): a_ = [] for id in input_ids: a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase ) input_tokens.append(_UpperCAmelCase ) a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase ) a_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_UpperCAmelCase ): if token[:2] == "##": a_ = token[2:] # save chinese tokens' pos if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ): ref_id.append(_UpperCAmelCase ) ref_ids.append(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) return ref_ids def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: a_ = f.readlines() a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ = LTP(args.ltp ) # faster in GPU device a_ = BertTokenizer.from_pretrained(args.bert ) a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids] f.writelines(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __lowerCAmelCase =parser.parse_args() main(args)
697
1
'''simple docstring''' from itertools import product def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]: """simple docstring""" a_ = sides_number a_ = max_face_number * dice_number a_ = [0] * (max_total + 1) a_ = 1 a_ = range(_UpperCAmelCase , max_face_number + 1 ) for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ): a_ = sum(_UpperCAmelCase ) totals_frequencies[total] += 1 return totals_frequencies def a ( ) -> float: """simple docstring""" a_ = total_frequency_distribution( sides_number=4 , dice_number=9 ) a_ = total_frequency_distribution( sides_number=6 , dice_number=6 ) a_ = 0 a_ = 9 a_ = 4 * 9 a_ = 6 for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) a_ = (4**9) * (6**6) a_ = peter_wins_count / total_games_number a_ = round(_UpperCAmelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
697
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __lowerCAmelCase ={ "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": 512, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = RetriBertTokenizer _UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars ): a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) ) a_ = do_lower_case a_ = strip_accents a_ = tokenize_chinese_chars a_ = normalizer_class(**UpperCAmelCase__ ) a_ = do_lower_case def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str: a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]: a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
697
1
'''simple docstring''' from math import factorial, radians def a ( _UpperCAmelCase , _UpperCAmelCase = 1_8 , _UpperCAmelCase = 1_0 ) -> float: """simple docstring""" a_ = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0) # Converting from degrees to radians a_ = radians(_UpperCAmelCase ) a_ = angle_in_radians a_ = 3 a_ = -1 for _ in range(_UpperCAmelCase ): result += (b * (angle_in_radians**a)) / factorial(_UpperCAmelCase ) a_ = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": __import__("doctest").testmod()
697
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "lilt" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = classifier_dropout a_ = channel_shrink_ratio a_ = max_ad_position_embeddings
697
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run __lowerCAmelCase =True except (ImportError, AttributeError): __lowerCAmelCase =object def a ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Any: """simple docstring""" pass __lowerCAmelCase =False __lowerCAmelCase =logging.get_logger("transformers-cli/serving") def a ( _UpperCAmelCase ) -> Optional[int]: """simple docstring""" a_ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(_UpperCAmelCase , args.host , args.port , args.workers ) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = 42 class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = 42 _UpperCamelCase = 42 class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = 42 class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = 42 class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> Optional[int]: a_ = parser.add_parser( 'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' ) serve_parser.add_argument( '--task' , type=UpperCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , ) serve_parser.add_argument('--host' , type=UpperCAmelCase__ , default='localhost' , help='Interface the server will listen on.' ) serve_parser.add_argument('--port' , type=UpperCAmelCase__ , default=8888 , help='Port the serving will listen to.' ) serve_parser.add_argument('--workers' , type=UpperCAmelCase__ , default=1 , help='Number of http workers' ) serve_parser.add_argument('--model' , type=UpperCAmelCase__ , help='Model\'s name or path to stored model.' ) serve_parser.add_argument('--config' , type=UpperCAmelCase__ , help='Model\'s config name or path to stored model.' ) serve_parser.add_argument('--tokenizer' , type=UpperCAmelCase__ , help='Tokenizer name to use.' ) serve_parser.add_argument( '--device' , type=UpperCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) serve_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict: a_ = pipeline a_ = host a_ = port a_ = workers if not _serve_dependencies_installed: raise RuntimeError( 'Using serve command requires FastAPI and uvicorn. ' 'Please install transformers with [serving]: pip install "transformers[serving]".' 'Or install FastAPI and uvicorn separately.' ) else: logger.info(F'''Serving model over {host}:{port}''' ) a_ = FastAPI( routes=[ APIRoute( '/' , self.model_info , response_model=UpperCAmelCase__ , response_class=UpperCAmelCase__ , methods=['GET'] , ), APIRoute( '/tokenize' , self.tokenize , response_model=UpperCAmelCase__ , response_class=UpperCAmelCase__ , methods=['POST'] , ), APIRoute( '/detokenize' , self.detokenize , response_model=UpperCAmelCase__ , response_class=UpperCAmelCase__ , methods=['POST'] , ), APIRoute( '/forward' , self.forward , response_model=UpperCAmelCase__ , response_class=UpperCAmelCase__ , methods=['POST'] , ), ] , timeout=600 , ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: run(self._app , host=self.host , port=self.port , workers=self.workers ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = Body(UpperCAmelCase__ , embed=UpperCAmelCase__ ) , UpperCAmelCase__ = Body(UpperCAmelCase__ , embed=UpperCAmelCase__ ) ) -> int: try: a_ = self._pipeline.tokenizer.tokenize(UpperCAmelCase__ ) if return_ids: a_ = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) return ServeTokenizeResult(tokens=UpperCAmelCase__ , tokens_ids=UpperCAmelCase__ ) else: return ServeTokenizeResult(tokens=UpperCAmelCase__ ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(UpperCAmelCase__ )} ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = Body(UpperCAmelCase__ , embed=UpperCAmelCase__ ) , UpperCAmelCase__ = Body(UpperCAmelCase__ , embed=UpperCAmelCase__ ) , UpperCAmelCase__ = Body(UpperCAmelCase__ , embed=UpperCAmelCase__ ) , ) -> Optional[int]: try: a_ = self._pipeline.tokenizer.decode(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return ServeDeTokenizeResult(model='' , text=UpperCAmelCase__ ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(UpperCAmelCase__ )} ) async def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__=Body(UpperCAmelCase__ , embed=UpperCAmelCase__ ) ) -> Optional[int]: # Check we don't have empty string if len(UpperCAmelCase__ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model a_ = self._pipeline(UpperCAmelCase__ ) return ServeForwardResult(output=UpperCAmelCase__ ) except Exception as e: raise HTTPException(500 , {'error': str(UpperCAmelCase__ )} )
697
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # We need to create solution object to save path. a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase ) if solved: print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): a_ = 1 return True a_ = (not i < 0) and (not j < 0) # Check lower bounds a_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. a_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited a_ = 1 # check for directions if ( run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase ) ): return True a_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' import enum import shutil import sys __lowerCAmelCase , __lowerCAmelCase =shutil.get_terminal_size() __lowerCAmelCase ={"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class _snake_case ( enum.Enum ): """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = 1 def a ( _UpperCAmelCase , _UpperCAmelCase="" ) -> Tuple: """simple docstring""" sys.stdout.write(str(_UpperCAmelCase ) + end ) sys.stdout.flush() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="" ) -> Union[str, Any]: """simple docstring""" forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , _UpperCAmelCase ) def a ( ) -> List[Any]: """simple docstring""" forceWrite('\r' ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' ) def a ( ) -> List[str]: """simple docstring""" forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def a ( ) -> Dict: """simple docstring""" reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
697
'''simple docstring''' __lowerCAmelCase ={ "meter": "m", "kilometer": "km", "megametre": "Mm", "gigametre": "Gm", "terametre": "Tm", "petametre": "Pm", "exametre": "Em", "zettametre": "Zm", "yottametre": "Ym", } # Exponent of the factor(meter) __lowerCAmelCase ={ "m": 0, "km": 3, "Mm": 6, "Gm": 9, "Tm": 12, "Pm": 15, "Em": 18, "Zm": 21, "Ym": 24, } def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float: """simple docstring""" a_ = from_type.lower().strip('s' ) a_ = to_type.lower().strip('s' ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) if from_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) if to_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) a_ = METRIC_CONVERSION[from_sanitized] a_ = METRIC_CONVERSION[to_sanitized] a_ = 1 if from_exponent > to_exponent: a_ = from_exponent - to_exponent else: a_ = -(to_exponent - from_exponent) return value * pow(1_0 , _UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' import re def a ( _UpperCAmelCase ) -> str: """simple docstring""" if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
697
'''simple docstring''' import unittest from transformers import DonutProcessor __lowerCAmelCase ="naver-clova-ix/donut-base" class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } a_ = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) a_ = self.processor.tokenajson(UpperCAmelCase__ ) self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer __lowerCAmelCase =["bert-base-uncased", "bert-base-cased"] __lowerCAmelCase ="hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class _snake_case ( tf.keras.Model ): """simple docstring""" def __init__( self , UpperCAmelCase__ ) -> Optional[int]: super().__init__() a_ = tokenizer a_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) a_ = TFAutoModel.from_config(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Any: a_ = self.tokenizer(UpperCAmelCase__ ) a_ = self.bert(**UpperCAmelCase__ ) return out["pooler_output"] @require_tf @require_tensorflow_text class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Any: super().setUp() a_ = [ BertTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false a_ = [TFBertTokenizer.from_pretrained(UpperCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(UpperCAmelCase__ , use_fast_bert_tokenizer=UpperCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) a_ = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] a_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): a_ = tokenizer(UpperCAmelCase__ , return_tensors='tf' , padding='longest' ) a_ = tf_tokenizer(UpperCAmelCase__ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: for tf_tokenizer in self.tf_tokenizers: a_ = tf_tokenizer(self.paired_sentences ) a_ = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: for tf_tokenizer in self.tf_tokenizers: a_ = tf.function(UpperCAmelCase__ ) for test_inputs in (self.test_sentences, self.paired_sentences): a_ = tf.constant(UpperCAmelCase__ ) a_ = compiled_tokenizer(UpperCAmelCase__ ) a_ = tf_tokenizer(UpperCAmelCase__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> int: for tf_tokenizer in self.tf_tokenizers: a_ = ModelToSave(tokenizer=UpperCAmelCase__ ) a_ = tf.convert_to_tensor(self.test_sentences ) a_ = model(UpperCAmelCase__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: a_ = Path(UpperCAmelCase__ ) / 'saved.model' model.save(UpperCAmelCase__ ) a_ = tf.keras.models.load_model(UpperCAmelCase__ ) a_ = loaded_model(UpperCAmelCase__ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
697
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]: a_ = parent a_ = out_indices if out_indices is not None else [4] a_ = stage_names a_ = out_features a_ = backbone a_ = batch_size a_ = image_size a_ = num_channels a_ = use_pretrained_backbone a_ = is_training def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = TimmBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(UpperCAmelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = self.prepare_config_and_inputs() a_ , a_ = config_and_inputs a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (TimmBackbone,) if is_torch_available() else () _UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = TimmBackboneModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'resnet18' a_ = 'microsoft/resnet-18' a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Safetensors is not supported by timm.' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = True a_ = self.has_attentions # no need to test all models as different heads yield the same functionality a_ = self.all_model_classes[0] a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ) a_ = outputs[0][-1] # Encoder-/Decoder-only models a_ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a_ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=UpperCAmelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = None a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = False a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ )
697
1
'''simple docstring''' import argparse import json from tqdm import tqdm def a ( ) -> Tuple: """simple docstring""" a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=_UpperCAmelCase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=_UpperCAmelCase , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=_UpperCAmelCase , help='where to store parsed gold_data_path file' , ) a_ = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: a_ = json.load(_UpperCAmelCase ) for dpr_record in tqdm(_UpperCAmelCase ): a_ = dpr_record['question'] a_ = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(_UpperCAmelCase ) + '\n' ) if __name__ == "__main__": main()
697
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]: a_ = size if size is not None else {'height': 18, 'width': 18} a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = min_resolution a_ = max_resolution a_ = do_resize a_ = size a_ = apply_ocr def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = LayoutLMvaImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase__ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase__ ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # with apply_OCR = True a_ = LayoutLMvaImageProcessor() from datasets import load_dataset a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) a_ = Image.open(ds[0]['file'] ).convert('RGB' ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase__ ) self.assertListEqual(encoding.boxes , UpperCAmelCase__ ) # with apply_OCR = False a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
697
1
'''simple docstring''' import os from pathlib import Path def a ( ) -> List[str]: """simple docstring""" from torch.utils.cpp_extension import load a_ = Path(_UpperCAmelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' a_ = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , _UpperCAmelCase , with_cuda=_UpperCAmelCase , extra_include_paths=[str(_UpperCAmelCase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
697
'''simple docstring''' import math def a ( _UpperCAmelCase ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int: """simple docstring""" try: a_ = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) a_ = [] a_ = 2 while len(_UpperCAmelCase ) < nth: if is_prime(_UpperCAmelCase ): primes.append(_UpperCAmelCase ) num += 1 else: num += 1 return primes[len(_UpperCAmelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = XGLMTokenizer _UpperCamelCase = XGLMTokenizerFast _UpperCamelCase = True _UpperCamelCase = True def __SCREAMING_SNAKE_CASE ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing a_ = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = '<pad>' a_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(UpperCAmelCase__ ) , 1008 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) a_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCAmelCase__ , f.name ) a_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase__ ) a_ = pickle.dumps(UpperCAmelCase__ ) pickle.loads(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: if not self.test_rust_tokenizer: return a_ = self.get_tokenizer() a_ = self.get_rust_tokenizer() a_ = 'I was born in 92000, and this is falsé.' a_ = tokenizer.tokenize(UpperCAmelCase__ ) a_ = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) a_ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.get_rust_tokenizer() a_ = tokenizer.encode(UpperCAmelCase__ ) a_ = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'Hello World!' a_ = [2, 3_1227, 4447, 35] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off a_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # fmt: off a_ = { 'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='facebook/xglm-564M' , padding=UpperCAmelCase__ , )
697
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = [1, 2, 3, 4] a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = '' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) a_ , a_ = process_story(UpperCAmelCase__ ) a_ = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = ['It was the best of times.'] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = torch.tensor([1, 2, 3, 4] ) a_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = 101 a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ ) np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" if gpta_config_file == "": a_ = GPTaConfig() else: a_ = GPTaConfig.from_json_file(_UpperCAmelCase ) a_ = GPTaModel(_UpperCAmelCase ) # Load weights from numpy load_tf_weights_in_gpta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model a_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME a_ = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , _UpperCAmelCase ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) __lowerCAmelCase =parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
697
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class _snake_case ( unittest.TestCase , snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = load_tool('text-question-answering' ) self.tool.setup() a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
697
1
'''simple docstring''' def a ( _UpperCAmelCase = 5_0 ) -> int: """simple docstring""" a_ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
697
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
1
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=2 , UpperCAmelCase__=32 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=32 , UpperCAmelCase__=4 , UpperCAmelCase__=[0, 1, 2, 3] , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=3 , UpperCAmelCase__=[1, 384, 24, 24] , UpperCAmelCase__=True , UpperCAmelCase__=None , ) -> Optional[Any]: a_ = parent a_ = batch_size a_ = image_size a_ = patch_size a_ = num_channels a_ = is_training a_ = use_labels a_ = hidden_size a_ = num_hidden_layers a_ = backbone_out_indices a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = num_labels a_ = backbone_featmap_shape a_ = scope a_ = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) a_ = (image_size // patch_size) ** 2 a_ = num_patches + 1 def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a_ = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCAmelCase__ , backbone_featmap_shape=self.backbone_featmap_shape , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = DPTModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: a_ = self.num_labels a_ = DPTForDepthEstimation(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: a_ = self.num_labels a_ = DPTForSemanticSegmentation(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.prepare_config_and_inputs() a_ , a_ , a_ = config_and_inputs a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _snake_case ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _UpperCamelCase = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = DPTModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = True if model_class in get_values(UpperCAmelCase__ ): continue a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ).loss loss.backward() def __SCREAMING_SNAKE_CASE ( self ) -> Any: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = False a_ = True if model_class in get_values(UpperCAmelCase__ ) or not model_class.supports_gradient_checkpointing: continue a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ).loss loss.backward() def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = _config_zero_init(UpperCAmelCase__ ) for model_class in self.all_model_classes: a_ = model_class(config=UpperCAmelCase__ ) # Skip the check for the backbone a_ = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": a_ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: a_ = DPTModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = 'add' with self.assertRaises(UpperCAmelCase__ ): a_ = DPTForDepthEstimation(UpperCAmelCase__ ) def a ( ) -> Union[str, Any]: """simple docstring""" a_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) a_ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(UpperCAmelCase__ ) a_ = prepare_img() a_ = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): a_ = model(**UpperCAmelCase__ ) a_ = outputs.predicted_depth # verify the predicted depth a_ = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , UpperCAmelCase__ ) a_ = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCAmelCase__ , atol=1e-4 ) )
697
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "vit" def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict: super().__init__(**UpperCAmelCase__ ) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-4
697
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> None: warnings.warn( 'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use DeformableDetrImageProcessor instead.' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
697
'''simple docstring''' def a ( _UpperCAmelCase = 5_0 ) -> int: """simple docstring""" a_ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # We need to create solution object to save path. a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase ) if solved: print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): a_ = 1 return True a_ = (not i < 0) and (not j < 0) # Check lower bounds a_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. a_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited a_ = 1 # check for directions if ( run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase ) ): return True a_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def a ( ) -> None: """simple docstring""" print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
697
1
'''simple docstring''' import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" if openai_config_file == "": a_ = OpenAIGPTConfig() else: a_ = OpenAIGPTConfig.from_json_file(_UpperCAmelCase ) a_ = OpenAIGPTModel(_UpperCAmelCase ) # Load weights from numpy load_tf_weights_in_openai_gpt(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model a_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME a_ = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , _UpperCAmelCase ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--openai_checkpoint_folder_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--openai_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) __lowerCAmelCase =parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
697
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a ( _UpperCAmelCase ) -> int: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str: a_ = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' ) download_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = model a_ = cache a_ = force a_ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
697
1
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json", "facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json", } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "encodec" def __init__( self , UpperCAmelCase__=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , UpperCAmelCase__=2_4000 , UpperCAmelCase__=1 , UpperCAmelCase__=False , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=128 , UpperCAmelCase__=32 , UpperCAmelCase__=1 , UpperCAmelCase__=[8, 5, 4, 2] , UpperCAmelCase__="weight_norm" , UpperCAmelCase__=7 , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=2 , UpperCAmelCase__=True , UpperCAmelCase__="reflect" , UpperCAmelCase__=2 , UpperCAmelCase__=2 , UpperCAmelCase__=1.0 , UpperCAmelCase__=1024 , UpperCAmelCase__=None , UpperCAmelCase__=True , **UpperCAmelCase__ , ) -> Tuple: a_ = target_bandwidths a_ = sampling_rate a_ = audio_channels a_ = normalize a_ = chunk_length_s a_ = overlap a_ = hidden_size a_ = num_filters a_ = num_residual_layers a_ = upsampling_ratios a_ = norm_type a_ = kernel_size a_ = last_kernel_size a_ = residual_kernel_size a_ = dilation_growth_rate a_ = use_causal_conv a_ = pad_mode a_ = compress a_ = num_lstm_layers a_ = trim_right_ratio a_ = codebook_size a_ = codebook_dim if codebook_dim is not None else hidden_size a_ = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**UpperCAmelCase__ ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
697
'''simple docstring''' def a ( _UpperCAmelCase ) -> int: """simple docstring""" assert column_title.isupper() a_ = 0 a_ = len(_UpperCAmelCase ) - 1 a_ = 0 while index >= 0: a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' __lowerCAmelCase ={ "meter": "m", "kilometer": "km", "megametre": "Mm", "gigametre": "Gm", "terametre": "Tm", "petametre": "Pm", "exametre": "Em", "zettametre": "Zm", "yottametre": "Ym", } # Exponent of the factor(meter) __lowerCAmelCase ={ "m": 0, "km": 3, "Mm": 6, "Gm": 9, "Tm": 12, "Pm": 15, "Em": 18, "Zm": 21, "Ym": 24, } def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float: """simple docstring""" a_ = from_type.lower().strip('s' ) a_ = to_type.lower().strip('s' ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) if from_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) if to_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) a_ = METRIC_CONVERSION[from_sanitized] a_ = METRIC_CONVERSION[to_sanitized] a_ = 1 if from_exponent > to_exponent: a_ = from_exponent - to_exponent else: a_ = -(to_exponent - from_exponent) return value * pow(1_0 , _UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
697
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]: super().__init__( feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = spectrogram_length a_ = num_channels a_ = patch_size a_ = feature_size // self.patch_size[1] a_ = n_fft a_ = sampling_rate // hop_length_to_sampling_rate a_ = sampling_rate a_ = padding_value a_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray: a_ = spectrogram( UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) a_ = log_spec[:, :-1] a_ = log_spec - 2_0.0 a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a_ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ): a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa ) elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a_ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a_ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCAmelCase__ ): a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a_ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a_ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) # convert into correct format for padding a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a_ = padded_audio_features * self.padding_value for i in range(len(UpperCAmelCase__ ) ): a_ = audio_features[i] a_ = feature # return as BatchFeature if return_attention_mask: a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: a_ = {'audio_values': padded_audio_features} a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) return encoded_inputs
697
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a ( ) -> List[str]: """simple docstring""" a_ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' a_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) return image def a ( _UpperCAmelCase ) -> List[Any]: """simple docstring""" a_ = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" a_ = dct.pop(_UpperCAmelCase ) a_ = val def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) a_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a_ = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) ) a_ = qkv_bias def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = 3_6_4 if 'coco' in model_name else 2_2_4 a_ = InstructBlipVisionConfig(image_size=_UpperCAmelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a_ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict() elif "vicuna-13b" in model_name: a_ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a_ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict() a_ = InstructBlipConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase , qformer_config=_UpperCAmelCase ) return config, image_size @torch.no_grad() def a ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Optional[int]: """simple docstring""" a_ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: a_ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a_ = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) a_ , a_ = get_blipa_config(_UpperCAmelCase ) a_ = InstructBlipForConditionalGeneration(_UpperCAmelCase ).eval() a_ = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } a_ , a_ = model_name_to_original[model_name] # load original model print('Loading original model...' ) a_ = 'cuda:1' if torch.cuda.is_available() else 'cpu' a_ = 'cuda:2' if torch.cuda.is_available() else 'cpu' a_ , a_ , a_ = load_model_and_preprocess( name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase ) original_model.eval() print('Done!' ) # update state dict keys a_ = original_model.state_dict() a_ = create_rename_keys(_UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a_ = state_dict.pop(_UpperCAmelCase ) if key.startswith('Qformer.bert' ): a_ = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: a_ = key.replace('self' , 'attention' ) if "llm_proj" in key: a_ = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: a_ = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): a_ = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): a_ = key.replace('t5' , 'language' ) a_ = val # read in qv biases read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) a_ = load_demo_image() a_ = 'What is unusual about this image?' # create processor a_ = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase ) a_ = InstructBlipProcessor( image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase , ) a_ = processor(images=_UpperCAmelCase , text=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # make sure processor creates exact same pixel values a_ = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase ) a_ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCAmelCase ) original_model.to(_UpperCAmelCase ) hf_model.to(_UpperCAmelCase ) with torch.no_grad(): if "vicuna" in model_name: a_ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits a_ = hf_model(**_UpperCAmelCase ).logits else: a_ = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits a_ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_UpperCAmelCase ) a_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 ) a_ = hf_model(**_UpperCAmelCase , labels=_UpperCAmelCase ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a_ = 1E-4 if 'vicuna' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , _UpperCAmelCase , atol=_UpperCAmelCase ) print('Looks ok!' ) print('Generating with original model...' ) a_ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) a_ = hf_model.generate( **_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a_ = 2 print('Original generation:' , _UpperCAmelCase ) a_ = processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) a_ = [text.strip() for text in output_text] print('HF generation:' , _UpperCAmelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_UpperCAmelCase ) hf_model.save_pretrained(_UpperCAmelCase ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() __lowerCAmelCase =[ "instructblip-vicuna-7b", "instructblip-vicuna-13b", "instructblip-flan-t5-xl", "instructblip-flan-t5-xxl", ] parser.add_argument( "--model_name", default="instructblip-flan-t5-xl", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __lowerCAmelCase =parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = len(set_a.intersection(_UpperCAmelCase ) ) if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) else: a_ = len(set_a.union(_UpperCAmelCase ) ) return intersection / union if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ): a_ = [element for element in set_a if element in set_b] if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / union else: a_ = set_a + [element for element in set_b if element not in set_a] return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return None if __name__ == "__main__": __lowerCAmelCase ={"a", "b", "c", "d", "e"} __lowerCAmelCase ={"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
697
1
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __lowerCAmelCase =Lock() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_UpperCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() a_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left a_ = min(_UpperCAmelCase , _UpperCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_UpperCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() a_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right a_ = max(_UpperCAmelCase , _UpperCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_UpperCAmelCase ) def a ( _UpperCAmelCase ) -> List[str]: """simple docstring""" a_ = [] a_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop a_ = Pipe() a_ = Pipe() process_array_.append( Process( target=_UpperCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) a_ = temp_rs a_ = temp_rr for i in range(1 , len(_UpperCAmelCase ) - 1 ): a_ = Pipe() a_ = Pipe() process_array_.append( Process( target=_UpperCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) a_ = temp_rs a_ = temp_rr process_array_.append( Process( target=_UpperCAmelCase , args=( len(_UpperCAmelCase ) - 1, arr[len(_UpperCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_UpperCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_UpperCAmelCase ) ): a_ = result_pipe[p][0].recv() process_array_[p].join() return arr def a ( ) -> Optional[Any]: """simple docstring""" a_ = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*_UpperCAmelCase ) a_ = odd_even_transposition(_UpperCAmelCase ) print('Sorted List\n' ) print(*_UpperCAmelCase ) if __name__ == "__main__": main()
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) a_ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _snake_case ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = IFImgaImgSuperResolutionPipeline _UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} _UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} ) _UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"} def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return self._get_superresolution_dummy_components() def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ) -> List[str]: if str(UpperCAmelCase__ ).startswith('mps' ): a_ = torch.manual_seed(UpperCAmelCase__ ) else: a_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) a_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) a_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __SCREAMING_SNAKE_CASE ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: self._test_save_load_local() def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
697
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str: super().__init__( features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = Generator( cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: # Build iterable dataset if self.streaming: a_ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: a_ = None a_ = None a_ = None a_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , ) a_ = self.builder.as_dataset( split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
697
1
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets __lowerCAmelCase ="\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" __lowerCAmelCase ="\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" __lowerCAmelCase ="\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> Union[str, Any]: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): a_ = new_id # turn into Numpy arrays a_ = np.array(_UpperCAmelCase ) a_ = np.array(_UpperCAmelCase ) if reduce_labels: a_ = 2_5_5 a_ = label - 1 a_ = 2_5_5 a_ = label != ignore_index a_ = np.not_equal(_UpperCAmelCase , _UpperCAmelCase ) a_ = pred_label[mask] a_ = np.array(_UpperCAmelCase )[mask] a_ = pred_label[pred_label == label] a_ = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0] a_ = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0] a_ = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0] a_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> Dict: """simple docstring""" a_ = np.zeros((num_labels,) , dtype=np.floataa ) a_ = np.zeros((num_labels,) , dtype=np.floataa ) a_ = np.zeros((num_labels,) , dtype=np.floataa ) a_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(_UpperCAmelCase , _UpperCAmelCase ): a_ , a_ , a_ , a_ = intersect_and_union( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> Optional[Any]: """simple docstring""" a_ , a_ , a_ , a_ = total_intersect_and_union( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # compute metrics a_ = {} a_ = total_area_intersect.sum() / total_area_label.sum() a_ = total_area_intersect / total_area_union a_ = total_area_intersect / total_area_label a_ = np.nanmean(_UpperCAmelCase ) a_ = np.nanmean(_UpperCAmelCase ) a_ = all_acc a_ = iou a_ = acc if nan_to_num is not None: a_ = {metric: np.nan_to_num(_UpperCAmelCase , nan=_UpperCAmelCase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , ) -> str: a_ = mean_iou( results=UpperCAmelCase__ , gt_seg_maps=UpperCAmelCase__ , num_labels=UpperCAmelCase__ , ignore_index=UpperCAmelCase__ , nan_to_num=UpperCAmelCase__ , label_map=UpperCAmelCase__ , reduce_labels=UpperCAmelCase__ , ) return iou_result
697
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __lowerCAmelCase =numpy.array([0, 0]) __lowerCAmelCase =numpy.array([0.5, 0.866_0254]) __lowerCAmelCase =numpy.array([1, 0]) __lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = initial_vectors for _ in range(_UpperCAmelCase ): a_ = iteration_step(_UpperCAmelCase ) return vectors def a ( _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = [] for i, start_vector in enumerate(vectors[:-1] ): a_ = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) a_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray: """simple docstring""" a_ = numpy.radians(_UpperCAmelCase ) a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) a_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase , _UpperCAmelCase ) def a ( _UpperCAmelCase ) -> None: """simple docstring""" a_ = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase , _UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
697
1
'''simple docstring''' import random from .binary_exp_mod import bin_exp_mod def a ( _UpperCAmelCase , _UpperCAmelCase=1_0_0_0 ) -> Dict: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd a_ = n - 1 a_ = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) a_ = 0 while count < prec: a_ = random.randint(2 , n - 1 ) a_ = bin_exp_mod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if b != 1: a_ = True for _ in range(_UpperCAmelCase ): if b == n - 1: a_ = False break a_ = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __lowerCAmelCase =abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
697
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a ( _UpperCAmelCase ) -> int: """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" for char in word: a_ = ord(_UpperCAmelCase ) if not _is_chinese_char(_UpperCAmelCase ): return 0 return 1 def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = set() for token in tokens: a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase ) if chinese_word: word_set.add(_UpperCAmelCase ) a_ = list(_UpperCAmelCase ) return word_list def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" if not chinese_word_set: return bert_tokens a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] ) a_ = bert_tokens a_ , a_ = 0, len(_UpperCAmelCase ) while start < end: a_ = True if is_chinese(bert_word[start] ): a_ = min(end - start , _UpperCAmelCase ) for i in range(_UpperCAmelCase , 1 , -1 ): a_ = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ = '##' + bert_word[j] a_ = start + i a_ = False break if single_word: start += 1 return bert_word def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws a_ = [get_chinese_word(_UpperCAmelCase ) for r in res] ltp_res.extend(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 ) bert_res.extend(res['input_ids'] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ): a_ = [] for id in input_ids: a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase ) input_tokens.append(_UpperCAmelCase ) a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase ) a_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_UpperCAmelCase ): if token[:2] == "##": a_ = token[2:] # save chinese tokens' pos if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ): ref_id.append(_UpperCAmelCase ) ref_ids.append(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) return ref_ids def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: a_ = f.readlines() a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ = LTP(args.ltp ) # faster in GPU device a_ = BertTokenizer.from_pretrained(args.bert ) a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids] f.writelines(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __lowerCAmelCase =parser.parse_args() main(args)
697
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=snake_case ): """simple docstring""" _UpperCamelCase = ["transformers", "torch", "note_seq"] def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[Any]: requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> int: requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> str: requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
697
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __lowerCAmelCase ={ "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": 512, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = RetriBertTokenizer _UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars ): a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) ) a_ = do_lower_case a_ = strip_accents a_ = tokenize_chinese_chars a_ = normalizer_class(**UpperCAmelCase__ ) a_ = do_lower_case def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str: a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]: a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
697
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __lowerCAmelCase =[ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" for attribute in key.split('.' ): a_ = getattr(_UpperCAmelCase , _UpperCAmelCase ) if weight_type is not None: a_ = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape else: a_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": a_ = value elif weight_type == "weight_g": a_ = value elif weight_type == "weight_v": a_ = value elif weight_type == "bias": a_ = value elif weight_type == "running_mean": a_ = value elif weight_type == "running_var": a_ = value elif weight_type == "num_batches_tracked": a_ = value elif weight_type == "inv_freq": a_ = value else: a_ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" a_ = [] a_ = fairseq_model.state_dict() a_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): a_ = False if "conv_layers" in name: load_conv_layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) a_ = True else: for key, mapped_key in MAPPING.items(): a_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: a_ = True if "*" in mapped_key: a_ = name.split(_UpperCAmelCase )[0].split('.' )[-2] a_ = mapped_key.replace('*' , _UpperCAmelCase ) if "pos_bias_u" in name: a_ = None elif "pos_bias_v" in name: a_ = None elif "weight_g" in name: a_ = 'weight_g' elif "weight_v" in name: a_ = 'weight_v' elif "bias" in name: a_ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj a_ = 'weight' elif "running_mean" in name: a_ = 'running_mean' elif "inv_freq" in name: a_ = 'inv_freq' elif "running_var" in name: a_ = 'running_var' elif "num_batches_tracked" in name: a_ = 'num_batches_tracked' else: a_ = None set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) continue if not is_used: unused_weights.append(_UpperCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" a_ = full_name.split('conv_layers.' )[-1] a_ = name.split('.' ) a_ = int(items[0] ) a_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) a_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_UpperCAmelCase ) @torch.no_grad() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ) -> Tuple: """simple docstring""" if config_path is not None: a_ = WavaVecaConformerConfig.from_pretrained(_UpperCAmelCase , hidden_act='swish' ) else: a_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: a_ = 'rotary' if is_finetuned: if dict_path: a_ = Dictionary.load(_UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a_ = target_dict.pad_index a_ = target_dict.bos_index a_ = target_dict.eos_index a_ = len(target_dict.symbols ) a_ = os.path.join(_UpperCAmelCase , 'vocab.json' ) if not os.path.isdir(_UpperCAmelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) ) return os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) a_ = target_dict.indices # fairseq has the <pad> and <s> switched a_ = 0 a_ = 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(_UpperCAmelCase , _UpperCAmelCase ) a_ = WavaVecaCTCTokenizer( _UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , ) a_ = True if config.feat_extract_norm == 'layer' else False a_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ) a_ = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) a_ = WavaVecaConformerForCTC(_UpperCAmelCase ) else: a_ = WavaVecaConformerForPreTraining(_UpperCAmelCase ) if is_finetuned: a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: a_ = argparse.Namespace(task='audio_pretraining' ) a_ = fairseq.tasks.setup_task(_UpperCAmelCase ) a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCAmelCase ) a_ = model[0].eval() recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __lowerCAmelCase =parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "lilt" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = classifier_dropout a_ = channel_shrink_ratio a_ = max_ad_position_embeddings
697
1
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=224 , UpperCAmelCase__=1000 , UpperCAmelCase__=[3, 3, 6, 4] , UpperCAmelCase__=[48, 56, 112, 220] , ) -> List[str]: a_ = parent a_ = batch_size a_ = num_channels a_ = is_training a_ = use_labels a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = num_labels a_ = image_size a_ = layer_depths a_ = embed_dims def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.num_labels ) a_ = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase__ , layer_scale_init_value=1e-5 , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: a_ = SwiftFormerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = self.num_labels a_ = SwiftFormerForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) a_ = SwiftFormerForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: ((a_) , (a_) , (a_)) = self.prepare_config_and_inputs() a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _snake_case ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _UpperCamelCase = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = SwiftFormerModelTester(self ) a_ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = SwiftFormerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='SwiftFormer does not output attentions' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: pass def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: def check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) a_ = outputs.hidden_states a_ = 8 self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCAmelCase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: def _config_zero_init(UpperCAmelCase__ ): a_ = copy.deepcopy(UpperCAmelCase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCAmelCase__ , UpperCAmelCase__ , 1e-10 ) if isinstance(getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ): a_ = _config_zero_init(getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return configs_no_init a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = _config_zero_init(UpperCAmelCase__ ) for model_class in self.all_model_classes: a_ = model_class(config=UpperCAmelCase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: pass def a ( ) -> Optional[Any]: """simple docstring""" a_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> int: return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(UpperCAmelCase__ ) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): a_ = model(**UpperCAmelCase__ ) # verify the logits a_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) a_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
697
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # We need to create solution object to save path. a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase ) if solved: print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): a_ = 1 return True a_ = (not i < 0) and (not j < 0) # Check lower bounds a_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. a_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited a_ = 1 # check for directions if ( run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase ) ): return True a_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' from timeit import timeit def a ( _UpperCAmelCase ) -> int: """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) a_ = 0 while number: number &= number - 1 result += 1 return result def a ( _UpperCAmelCase ) -> int: """simple docstring""" if number < 0: raise ValueError('the value of input must not be negative' ) a_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def a ( ) -> None: """simple docstring""" def do_benchmark(_UpperCAmelCase ) -> None: a_ = 'import __main__ as z' print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) a_ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=_UpperCAmelCase ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) a_ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=_UpperCAmelCase , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (2_5, 3_7, 5_8, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
697
'''simple docstring''' __lowerCAmelCase ={ "meter": "m", "kilometer": "km", "megametre": "Mm", "gigametre": "Gm", "terametre": "Tm", "petametre": "Pm", "exametre": "Em", "zettametre": "Zm", "yottametre": "Ym", } # Exponent of the factor(meter) __lowerCAmelCase ={ "m": 0, "km": 3, "Mm": 6, "Gm": 9, "Tm": 12, "Pm": 15, "Em": 18, "Zm": 21, "Ym": 24, } def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float: """simple docstring""" a_ = from_type.lower().strip('s' ) a_ = to_type.lower().strip('s' ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) if from_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) if to_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) a_ = METRIC_CONVERSION[from_sanitized] a_ = METRIC_CONVERSION[to_sanitized] a_ = 1 if from_exponent > to_exponent: a_ = from_exponent - to_exponent else: a_ = -(to_exponent - from_exponent) return value * pow(1_0 , _UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCAmelCase =16 __lowerCAmelCase =32 def a ( _UpperCAmelCase , _UpperCAmelCase = 1_6 ) -> Tuple: """simple docstring""" a_ = AutoTokenizer.from_pretrained('bert-base-cased' ) a_ = load_dataset('glue' , 'mrpc' ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) a_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a_ = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a_ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. a_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a_ = 1_6 elif accelerator.mixed_precision != "no": a_ = 8 else: a_ = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. a_ = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) a_ = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCAmelCase =mocked_dataloaders # noqa: F811 def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1": a_ = 2 # Initialize accelerator a_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a_ = config['lr'] a_ = int(config['num_epochs'] ) a_ = int(config['seed'] ) a_ = int(config['batch_size'] ) a_ = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation a_ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: a_ = batch_size // MAX_GPU_BATCH_SIZE a_ = MAX_GPU_BATCH_SIZE set_seed(_UpperCAmelCase ) a_ , a_ = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a_ = model.to(accelerator.device ) # Instantiate optimizer a_ = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler a_ = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a_ , a_ , a_ , a_ , a_ = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a_ = model(**_UpperCAmelCase ) a_ = outputs.loss a_ = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() a_ = 0 for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a_ = model(**_UpperCAmelCase ) a_ = outputs.logits.argmax(dim=-1 ) a_ , a_ = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(_UpperCAmelCase ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples a_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] a_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) a_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase ) def a ( ) -> Union[str, Any]: """simple docstring""" a_ = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) a_ = parser.parse_args() a_ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
697
'''simple docstring''' import unittest from transformers import DonutProcessor __lowerCAmelCase ="naver-clova-ix/donut-base" class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } a_ = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) a_ = self.processor.tokenajson(UpperCAmelCase__ ) self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' from typing import Any class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ ) -> List[str]: a_ = data a_ = None def __repr__( self ) -> str: return F'''Node({self.data})''' class _snake_case : """simple docstring""" def __init__( self ) -> Optional[int]: a_ = None def __iter__( self ) -> Any: a_ = self.head while node: yield node.data a_ = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(UpperCAmelCase__ ) for item in self] ) def __getitem__( self , UpperCAmelCase__ ) -> Any: if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None: if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) a_ = self.head for _ in range(UpperCAmelCase__ ): a_ = current.next a_ = data def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None: self.insert_nth(len(self ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None: self.insert_nth(0 , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None: if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) a_ = Node(UpperCAmelCase__ ) if self.head is None: a_ = new_node elif index == 0: a_ = self.head # link new_node to head a_ = new_node else: a_ = self.head for _ in range(index - 1 ): a_ = temp.next a_ = temp.next a_ = new_node def __SCREAMING_SNAKE_CASE ( self ) -> None: # print every node data print(self ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: return self.delete_nth(0 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) a_ = self.head # default first node if index == 0: a_ = self.head.next else: a_ = self.head for _ in range(index - 1 ): a_ = temp.next a_ = temp.next a_ = temp.next.next return delete_node.data def __SCREAMING_SNAKE_CASE ( self ) -> bool: return self.head is None def __SCREAMING_SNAKE_CASE ( self ) -> None: a_ = None a_ = self.head while current: # Store the current node's next node. a_ = current.next # Make the current node's next point backwards a_ = prev # Make the previous node be the current node a_ = current # Make the current node the next node (to progress iteration) a_ = next_node # Return prev in order to put the head at the end a_ = prev def a ( ) -> None: """simple docstring""" a_ = LinkedList() assert linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(1_0 ): assert len(_UpperCAmelCase ) == i linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 1_1 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(1_1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 1_2 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 1_0 assert linked_list.delete_tail() == 1_1 assert len(_UpperCAmelCase ) == 9 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 1_0 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): a_ = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) ) def a ( ) -> None: """simple docstring""" a_ = [ -9, 1_0_0, Node(7_7_3_4_5_1_1_2 ), 'dlrow olleH', 7, 5_5_5_5, 0, -1_9_2.5_5_5_5_5, 'Hello, world!', 7_7.9, Node(1_0 ), None, None, 1_2.2_0, ] a_ = LinkedList() for i in test_input: linked_list.insert_tail(_UpperCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head a_ = linked_list.delete_head() assert result == -9 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail a_ = linked_list.delete_tail() assert result == 1_2.2 assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list a_ = linked_list.delete_nth(1_0 ) assert result is None assert ( str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_UpperCAmelCase ) assert ( str(_UpperCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_UpperCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def a ( ) -> List[Any]: """simple docstring""" from doctest import testmod testmod() a_ = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(_UpperCAmelCase ) print('\nReading/changing Node data using indexing:' ) print(F'''Element at Position 1: {linked_list[1]}''' ) a_ = input('Enter New Value: ' ).strip() print('New list:' ) print(_UpperCAmelCase ) print(F'''length of linked_list is : {len(_UpperCAmelCase )}''' ) if __name__ == "__main__": main()
697
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]: a_ = parent a_ = out_indices if out_indices is not None else [4] a_ = stage_names a_ = out_features a_ = backbone a_ = batch_size a_ = image_size a_ = num_channels a_ = use_pretrained_backbone a_ = is_training def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = TimmBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(UpperCAmelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = self.prepare_config_and_inputs() a_ , a_ = config_and_inputs a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (TimmBackbone,) if is_torch_available() else () _UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = TimmBackboneModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'resnet18' a_ = 'microsoft/resnet-18' a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Safetensors is not supported by timm.' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = True a_ = self.has_attentions # no need to test all models as different heads yield the same functionality a_ = self.all_model_classes[0] a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ) a_ = outputs[0][-1] # Encoder-/Decoder-only models a_ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a_ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=UpperCAmelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = None a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = False a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ )
697
1
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> None: """simple docstring""" if start is None: a_ = 0 if end is None: a_ = len(_UpperCAmelCase ) - 1 if start >= end: return a_ = (start + end) // 2 slowsort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) slowsort(_UpperCAmelCase , mid + 1 , _UpperCAmelCase ) if sequence[end] < sequence[mid]: a_ , a_ = sequence[mid], sequence[end] slowsort(_UpperCAmelCase , _UpperCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
697
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]: a_ = size if size is not None else {'height': 18, 'width': 18} a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = min_resolution a_ = max_resolution a_ = do_resize a_ = size a_ = apply_ocr def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = LayoutLMvaImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase__ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase__ ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # with apply_OCR = True a_ = LayoutLMvaImageProcessor() from datasets import load_dataset a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) a_ = Image.open(ds[0]['file'] ).convert('RGB' ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase__ ) self.assertListEqual(encoding.boxes , UpperCAmelCase__ ) # with apply_OCR = False a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
697
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __lowerCAmelCase =3 def a ( _UpperCAmelCase ) -> int: """simple docstring""" print('Generating primitive root of p' ) while True: a_ = random.randrange(3 , _UpperCAmelCase ) if pow(_UpperCAmelCase , 2 , _UpperCAmelCase ) == 1: continue if pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) == 1: continue return g def a ( _UpperCAmelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: """simple docstring""" print('Generating prime p...' ) a_ = rabin_miller.generate_large_prime(_UpperCAmelCase ) # select large prime number. a_ = primitive_root(_UpperCAmelCase ) # one primitive root on modulo p. a_ = random.randrange(3 , _UpperCAmelCase ) # private_key -> have to be greater than 2 for safety. a_ = cryptomath.find_mod_inverse(pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) a_ = (key_size, e_a, e_a, p) a_ = (key_size, d) return public_key, private_key def a ( _UpperCAmelCase , _UpperCAmelCase ) -> None: """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print('\nWARNING:' ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' 'Use a different name or delete these files and re-run this program.' ) sys.exit() a_ , a_ = generate_key(_UpperCAmelCase ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , 'w' ) as fo: fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , 'w' ) as fo: fo.write(F'''{private_key[0]},{private_key[1]}''' ) def a ( ) -> None: """simple docstring""" print('Making key files...' ) make_key_files('elgamal' , 2_0_4_8 ) print('Key files generation successful' ) if __name__ == "__main__": main()
697
'''simple docstring''' import math def a ( _UpperCAmelCase ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int: """simple docstring""" try: a_ = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) a_ = [] a_ = 2 while len(_UpperCAmelCase ) < nth: if is_prime(_UpperCAmelCase ): primes.append(_UpperCAmelCase ) num += 1 else: num += 1 return primes[len(_UpperCAmelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: """simple docstring""" if isinstance(_UpperCAmelCase , torch.Tensor ): return image elif isinstance(_UpperCAmelCase , PIL.Image.Image ): a_ = [image] if isinstance(image[0] , PIL.Image.Image ): a_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a_ = np.concatenate(_UpperCAmelCase , axis=0 ) a_ = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0 a_ = image.transpose(0 , 3 , 1 , 2 ) a_ = 2.0 * image - 1.0 a_ = torch.from_numpy(_UpperCAmelCase ) elif isinstance(image[0] , torch.Tensor ): a_ = torch.cat(_UpperCAmelCase , dim=0 ) return image def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.9_9_9_5 ) -> Tuple: """simple docstring""" if not isinstance(_UpperCAmelCase , np.ndarray ): a_ = True a_ = va.device a_ = va.cpu().numpy() a_ = va.cpu().numpy() a_ = np.sum(va * va / (np.linalg.norm(_UpperCAmelCase ) * np.linalg.norm(_UpperCAmelCase )) ) if np.abs(_UpperCAmelCase ) > DOT_THRESHOLD: a_ = (1 - t) * va + t * va else: a_ = np.arccos(_UpperCAmelCase ) a_ = np.sin(_UpperCAmelCase ) a_ = theta_a * t a_ = np.sin(_UpperCAmelCase ) a_ = np.sin(theta_a - theta_t ) / sin_theta_a a_ = sin_theta_t / sin_theta_a a_ = sa * va + sa * va if inputs_are_torch: a_ = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase ) return va def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: """simple docstring""" a_ = F.normalize(_UpperCAmelCase , dim=-1 ) a_ = F.normalize(_UpperCAmelCase , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" for param in model.parameters(): a_ = value class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Dict: super().__init__() self.register_modules( vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , clip_model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , coca_model=UpperCAmelCase__ , coca_tokenizer=UpperCAmelCase__ , coca_transform=UpperCAmelCase__ , ) a_ = ( feature_extractor.size if isinstance(feature_extractor.size , UpperCAmelCase__ ) else feature_extractor.size['shortest_edge'] ) a_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , UpperCAmelCase__ ) set_requires_grad(self.clip_model , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: self.enable_attention_slicing(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: set_requires_grad(self.vae , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: set_requires_grad(self.vae , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: set_requires_grad(self.unet , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: set_requires_grad(self.unet , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: # get the original timestep using init_timestep a_ = min(int(num_inference_steps * strength ) , UpperCAmelCase__ ) a_ = max(num_inference_steps - init_timestep , 0 ) a_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str: if not isinstance(UpperCAmelCase__ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}''' ) a_ = image.to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a_ = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] a_ = torch.cat(UpperCAmelCase__ , dim=0 ) else: a_ = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a_ = 0.1_8_2_1_5 * init_latents a_ = init_latents.repeat_interleave(UpperCAmelCase__ , dim=0 ) a_ = randn_tensor(init_latents.shape , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) # get latents a_ = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) a_ = init_latents return latents def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> List[str]: a_ = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> str: a_ = self.feature_extractor.preprocess(UpperCAmelCase__ ) a_ = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a_ = self.clip_model.get_image_features(UpperCAmelCase__ ) a_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase__ ) a_ = image_embeddings_clip.repeat_interleave(UpperCAmelCase__ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Union[str, Any]: a_ = latents.detach().requires_grad_() a_ = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # predict the noise residual a_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a_ = self.scheduler.alphas_cumprod[timestep] a_ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a_ = torch.sqrt(UpperCAmelCase__ ) a_ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , UpperCAmelCase__ ): a_ = self.scheduler.sigmas[index] a_ = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a_ = 1 / 0.1_8_2_1_5 * sample a_ = self.vae.decode(UpperCAmelCase__ ).sample a_ = (image / 2 + 0.5).clamp(0 , 1 ) a_ = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) a_ = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) a_ = self.clip_model.get_image_features(UpperCAmelCase__ ) a_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase__ ) a_ = spherical_dist_loss(UpperCAmelCase__ , UpperCAmelCase__ ).mean() * clip_guidance_scale a_ = -torch.autograd.grad(UpperCAmelCase__ , UpperCAmelCase__ )[0] if isinstance(self.scheduler , UpperCAmelCase__ ): a_ = latents.detach() + grads * (sigma**2) a_ = noise_pred_original else: a_ = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 0.6 , UpperCAmelCase__ = 50 , UpperCAmelCase__ = 7.5 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 100 , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , UpperCAmelCase__ = 0.8 , UpperCAmelCase__ = 0.1 , UpperCAmelCase__ = 0.1 , ) -> Union[str, Any]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(UpperCAmelCase__ , torch.Generator ) and batch_size > 1: a_ = [generator] + [None] * (batch_size - 1) a_ = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a_ = [x[0] for x in coca_is_none if x[1]] a_ = ', '.join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) a_ = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) a_ = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style a_ = self.tokenizer( UpperCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='pt' , ) a_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a_ = self.tokenizer( UpperCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='pt' , ) a_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a_ = slerp(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt a_ = text_embeddings.repeat_interleave(UpperCAmelCase__ , dim=0 ) # set timesteps a_ = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a_ = {} if accepts_offset: a_ = 1 self.scheduler.set_timesteps(UpperCAmelCase__ , **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a_ , a_ = self.get_timesteps(UpperCAmelCase__ , UpperCAmelCase__ , self.device ) a_ = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image a_ = preprocess(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.prepare_latents( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , text_embeddings.dtype , self.device , UpperCAmelCase__ ) a_ = preprocess(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.prepare_latents( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , text_embeddings.dtype , self.device , UpperCAmelCase__ ) a_ = slerp(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if clip_guidance_scale > 0: a_ = self.get_clip_image_embeddings(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.get_clip_image_embeddings(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = slerp( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a_ = content_text_input.input_ids.shape[-1] a_ = self.tokenizer([''] , padding='max_length' , max_length=UpperCAmelCase__ , return_tensors='pt' ) a_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a_ = uncond_embeddings.repeat_interleave(UpperCAmelCase__ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a_ = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device='cpu' , dtype=UpperCAmelCase__ ).to( self.device ) else: a_ = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) a_ = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a_ = {} if accepts_eta: a_ = eta # check if the scheduler accepts generator a_ = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a_ = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance a_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a_ = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # predict the noise residual a_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: a_ , a_ = noise_pred.chunk(2 ) a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a_ = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a_ , a_ = self.cond_fn( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) # compute the previous noisy sample x_t -> x_t-1 a_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a_ = 1 / 0.1_8_2_1_5 * latents a_ = self.vae.decode(UpperCAmelCase__ ).sample a_ = (image / 2 + 0.5).clamp(0 , 1 ) a_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a_ = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
697
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = [1, 2, 3, 4] a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = '' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) a_ , a_ = process_story(UpperCAmelCase__ ) a_ = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = ['It was the best of times.'] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = torch.tensor([1, 2, 3, 4] ) a_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = 101 a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ ) np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' import numpy as np from PIL import Image def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> np.ndarray: """simple docstring""" a_ = np.array(_UpperCAmelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) a_ = 0 a_ = 0 a_ = 0 a_ = 0 # compute the shape of the output matrix a_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape a_ = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix a_ = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 a_ = 0 a_ = 0 return updated_arr def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> np.ndarray: """simple docstring""" a_ = np.array(_UpperCAmelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) a_ = 0 a_ = 0 a_ = 0 a_ = 0 # compute the shape of the output matrix a_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape a_ = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix a_ = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 a_ = 0 a_ = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="avgpooling", verbose=True) # Loading the image __lowerCAmelCase =Image.open("path_to_image") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
697
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class _snake_case ( unittest.TestCase , snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = load_tool('text-question-answering' ) self.tool.setup() a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
697
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( *UpperCAmelCase__ , **UpperCAmelCase__ ) -> str: pass @is_pipeline_test @require_vision @require_timm @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" _UpperCamelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = ObjectDetectionPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> str: a_ = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 ) self.assertGreater(len(UpperCAmelCase__ ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCAmelCase__ , { 'score': ANY(UpperCAmelCase__ ), 'label': ANY(UpperCAmelCase__ ), 'box': {'xmin': ANY(UpperCAmelCase__ ), 'ymin': ANY(UpperCAmelCase__ ), 'xmax': ANY(UpperCAmelCase__ ), 'ymax': ANY(UpperCAmelCase__ )}, } , ) import datasets a_ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) a_ = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] a_ = object_detector(UpperCAmelCase__ , threshold=0.0 ) self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for outputs in batch_outputs: self.assertGreater(len(UpperCAmelCase__ ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCAmelCase__ , { 'score': ANY(UpperCAmelCase__ ), 'label': ANY(UpperCAmelCase__ ), 'box': {'xmin': ANY(UpperCAmelCase__ ), 'ymin': ANY(UpperCAmelCase__ ), 'xmax': ANY(UpperCAmelCase__ ), 'ymax': ANY(UpperCAmelCase__ )}, } , ) @require_tf @unittest.skip('Object detection not implemented in TF' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: pass @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'hf-internal-testing/tiny-detr-mobilenetsv3' a_ = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase__ ) a_ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__ ) a_ = ObjectDetectionPipeline(model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) a_ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ] , ) a_ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = 'facebook/detr-resnet-50' a_ = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase__ ) a_ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__ ) a_ = ObjectDetectionPipeline(model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) a_ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) a_ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = 'facebook/detr-resnet-50' a_ = pipeline('object-detection' , model=UpperCAmelCase__ ) a_ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) a_ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ] , ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = 0.9_9_8_5 a_ = 'facebook/detr-resnet-50' a_ = pipeline('object-detection' , model=UpperCAmelCase__ ) a_ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=UpperCAmelCase__ ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) @require_torch @require_pytesseract @slow def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'Narsil/layoutlmv3-finetuned-funsd' a_ = 0.9_9_9_3 a_ = pipeline('object-detection' , model=UpperCAmelCase__ , threshold=UpperCAmelCase__ ) a_ = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, ] , )
697
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
1
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp __lowerCAmelCase =5 __lowerCAmelCase =10 @require_sentencepiece @require_tokenizers class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = SpeechaTextTokenizer _UpperCamelCase = False _UpperCamelCase = True def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: super().setUp() a_ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__ ) a_ = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCAmelCase__ ) )] a_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) a_ = Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] ) a_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = '<pad>' a_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(UpperCAmelCase__ ) , 1001 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) a_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [289, 50, 14, 174, 386] , ) a_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) a_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # fmt: off a_ = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class _snake_case ( unittest.TestCase ): """simple docstring""" _UpperCamelCase = "valhalla/s2t_mustc_multilinguial_medium" _UpperCamelCase = "C'est trop cool" _UpperCamelCase = "Esto es genial" @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> str: a_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __SCREAMING_SNAKE_CASE ( self ) -> Any: self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: self.assertEqual(self.tokenizer.vocab_size , 1_0000 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) a_ = [ES_CODE, 4, 1601, 47, 7647, 2] a_ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) a_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = 'fr' a_ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) a_ = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
697
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "vit" def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict: super().__init__(**UpperCAmelCase__ ) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-4
697
1
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str: super().__init__( features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = Generator( cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: # Build iterable dataset if self.streaming: a_ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: a_ = None a_ = None a_ = None a_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , ) a_ = self.builder.as_dataset( split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
697
'''simple docstring''' def a ( _UpperCAmelCase = 5_0 ) -> int: """simple docstring""" a_ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' def a ( _UpperCAmelCase ) -> None: """simple docstring""" a_ = generate_pascal_triangle(_UpperCAmelCase ) for row_idx in range(_UpperCAmelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=' ' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ' ) else: print(triangle[row_idx][col_idx] , end='' ) print() def a ( _UpperCAmelCase ) -> list[list[int]]: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) a_ = [] for current_row_idx in range(_UpperCAmelCase ): a_ = populate_current_row(_UpperCAmelCase , _UpperCAmelCase ) triangle.append(_UpperCAmelCase ) return triangle def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]: """simple docstring""" a_ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 a_ , a_ = 1, 1 for current_col_idx in range(1 , _UpperCAmelCase ): calculate_current_element( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return current_row def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None: """simple docstring""" a_ = triangle[current_row_idx - 1][current_col_idx - 1] a_ = triangle[current_row_idx - 1][current_col_idx] a_ = above_to_left_elt + above_to_right_elt def a ( _UpperCAmelCase ) -> list[list[int]]: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) a_ = [[1]] for row_index in range(1 , _UpperCAmelCase ): a_ = [0] + result[-1] + [0] a_ = row_index + 1 # Calculate the number of distinct elements in a row a_ = sum(divmod(_UpperCAmelCase , 2 ) ) a_ = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] a_ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() a_ = row_first_half + row_second_half result.append(_UpperCAmelCase ) return result def a ( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None: a_ = F'''{func.__name__}({value})''' a_ = timeit(F'''__main__.{call}''' , setup='import __main__' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(1_5 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def a ( ) -> None: """simple docstring""" print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase ={ "configuration_xlm_roberta": [ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["XLMRobertaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["XLMRobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FlaxXLMRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a ( _UpperCAmelCase ) -> int: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str: a_ = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' ) download_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = model a_ = cache a_ = force a_ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
697
1
'''simple docstring''' def a ( _UpperCAmelCase = 4_0_0_0_0_0_0 ) -> int: """simple docstring""" a_ = [0, 1] a_ = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 a_ = 0 for j in range(len(_UpperCAmelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
697
'''simple docstring''' def a ( _UpperCAmelCase ) -> int: """simple docstring""" assert column_title.isupper() a_ = 0 a_ = len(_UpperCAmelCase ) - 1 a_ = 0 while index >= 0: a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase ={"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]: super().__init__( feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = spectrogram_length a_ = num_channels a_ = patch_size a_ = feature_size // self.patch_size[1] a_ = n_fft a_ = sampling_rate // hop_length_to_sampling_rate a_ = sampling_rate a_ = padding_value a_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray: a_ = spectrogram( UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) a_ = log_spec[:, :-1] a_ = log_spec - 2_0.0 a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a_ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ): a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa ) elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a_ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a_ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCAmelCase__ ): a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a_ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a_ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) # convert into correct format for padding a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a_ = padded_audio_features * self.padding_value for i in range(len(UpperCAmelCase__ ) ): a_ = audio_features[i] a_ = feature # return as BatchFeature if return_attention_mask: a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: a_ = {'audio_values': padded_audio_features} a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) return encoded_inputs
697
1
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) __lowerCAmelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _snake_case : """simple docstring""" _UpperCamelCase = field( default=snake_case , metadata={"help": "Model type selected in the list: " + ", ".join(snake_case )} ) _UpperCamelCase = field( default=snake_case , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) _UpperCamelCase = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _UpperCamelCase = field( default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) _UpperCamelCase = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) _UpperCamelCase = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} ) _UpperCamelCase = field( default=snake_case , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) _UpperCamelCase = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) _UpperCamelCase = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) _UpperCamelCase = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) _UpperCamelCase = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "train" _UpperCamelCase = "dev" class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = Split.train , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = "pt" , ) -> Optional[int]: a_ = args a_ = is_language_sensitive a_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): try: a_ = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) a_ = mode # Load data features from cache or dataset file a_ = 'v2' if args.version_2_with_negative else 'v1' a_ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a_ = cached_features_file + '.lock' with FileLock(UpperCAmelCase__ ): if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache: a_ = time.time() a_ = torch.load(UpperCAmelCase__ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. a_ = self.old_features['features'] a_ = self.old_features.get('dataset' , UpperCAmelCase__ ) a_ = self.old_features.get('examples' , UpperCAmelCase__ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: a_ = self.processor.get_dev_examples(args.data_dir ) else: a_ = self.processor.get_train_examples(args.data_dir ) a_ , a_ = squad_convert_examples_to_features( examples=self.examples , tokenizer=UpperCAmelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase__ , ) a_ = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , UpperCAmelCase__ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> List[Any]: return len(self.features ) def __getitem__( self , UpperCAmelCase__ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset a_ = self.features[i] a_ = torch.tensor(feature.input_ids , dtype=torch.long ) a_ = torch.tensor(feature.attention_mask , dtype=torch.long ) a_ = torch.tensor(feature.token_type_ids , dtype=torch.long ) a_ = torch.tensor(feature.cls_index , dtype=torch.long ) a_ = torch.tensor(feature.p_mask , dtype=torch.float ) a_ = torch.tensor(feature.is_impossible , dtype=torch.float ) a_ = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: a_ = torch.tensor(feature.start_position , dtype=torch.long ) a_ = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = len(set_a.intersection(_UpperCAmelCase ) ) if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) else: a_ = len(set_a.union(_UpperCAmelCase ) ) return intersection / union if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ): a_ = [element for element in set_a if element in set_b] if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / union else: a_ = set_a + [element for element in set_b if element not in set_a] return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return None if __name__ == "__main__": __lowerCAmelCase ={"a", "b", "c", "d", "e"} __lowerCAmelCase ={"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
697
1
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(_UpperCAmelCase , n - 1 , _UpperCAmelCase ) * a) % mod else: a_ = binary_exponentiation(_UpperCAmelCase , n / 2 , _UpperCAmelCase ) return (b * b) % mod # a prime number __lowerCAmelCase =701 __lowerCAmelCase =10_0000_0000 __lowerCAmelCase =10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) a_ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __lowerCAmelCase ="pt" elif is_tf_available(): __lowerCAmelCase ="tf" else: __lowerCAmelCase ="jax" class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = PerceiverTokenizer _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: super().setUp() a_ = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=20 , UpperCAmelCase__=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. a_ = [] for i in range(len(UpperCAmelCase__ ) ): try: a_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) a_ = list(filter(lambda UpperCAmelCase__ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCAmelCase__ ) ) a_ = list(filter(lambda UpperCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase__ ) , UpperCAmelCase__ ) ) if max_length is not None and len(UpperCAmelCase__ ) > max_length: a_ = toks[:max_length] if min_length is not None and len(UpperCAmelCase__ ) < min_length and len(UpperCAmelCase__ ) > 0: while len(UpperCAmelCase__ ) < min_length: a_ = toks + toks # toks_str = [t[1] for t in toks] a_ = [t[0] for t in toks] # Ensure consistency a_ = tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) if " " not in output_txt and len(UpperCAmelCase__ ) > 1: a_ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase__ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase__ ) ) if with_prefix_space: a_ = ' ' + output_txt a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) return output_txt, output_ids def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.perceiver_tokenizer a_ = 'Unicode €.' a_ = tokenizer(UpperCAmelCase__ ) a_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , UpperCAmelCase__ ) # decoding a_ = tokenizer.decode(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '[CLS]Unicode €.[SEP]' ) a_ = tokenizer('e è é ê ë' ) a_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , UpperCAmelCase__ ) # decoding a_ = tokenizer.decode(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.perceiver_tokenizer a_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off a_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on a_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) if FRAMEWORK != "jax": a_ = list(batch.input_ids.numpy()[0] ) else: a_ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = self.perceiver_tokenizer a_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] a_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , UpperCAmelCase__ ) self.assertIn('attention_mask' , UpperCAmelCase__ ) self.assertNotIn('decoder_input_ids' , UpperCAmelCase__ ) self.assertNotIn('decoder_attention_mask' , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.perceiver_tokenizer a_ = [ 'Summary of the text.', 'Another summary.', ] a_ = tokenizer( text_target=UpperCAmelCase__ , max_length=32 , padding='max_length' , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: # safety check on max_len default value so we are sure the test works a_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc a_ = tempfile.mkdtemp() a_ = ' He is very happy, UNwant\u00E9d,running' a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) tokenizer.save_pretrained(UpperCAmelCase__ ) a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase__ ) a_ = after_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) shutil.rmtree(UpperCAmelCase__ ) a_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc a_ = tempfile.mkdtemp() a_ = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) a_ = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) tokenizer.save_pretrained(UpperCAmelCase__ ) a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase__ ) a_ = after_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: a_ = json.load(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: a_ = json.load(UpperCAmelCase__ ) a_ = [F'''<extra_id_{i}>''' for i in range(125 )] a_ = added_tokens_extra_ids + [ 'an_additional_special_token' ] a_ = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(UpperCAmelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a_ = tokenizer_class.from_pretrained( UpperCAmelCase__ , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a_ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCAmelCase__ )] a_ = tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: pass def __SCREAMING_SNAKE_CASE ( self ) -> str: pass def __SCREAMING_SNAKE_CASE ( self ) -> str: # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens a_ = self.get_tokenizers(fast=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): a_ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] a_ = tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
697
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _snake_case ( snake_case ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str: super().__init__( features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = Generator( cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: # Build iterable dataset if self.streaming: a_ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: a_ = None a_ = None a_ = None a_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , ) a_ = self.builder.as_dataset( split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
697
1
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = len(set_a.intersection(_UpperCAmelCase ) ) if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) else: a_ = len(set_a.union(_UpperCAmelCase ) ) return intersection / union if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ): a_ = [element for element in set_a if element in set_b] if alternative_union: a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / union else: a_ = set_a + [element for element in set_b if element not in set_a] return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return len(_UpperCAmelCase ) / len(_UpperCAmelCase ) return None if __name__ == "__main__": __lowerCAmelCase ={"a", "b", "c", "d", "e"} __lowerCAmelCase ={"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
697
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __lowerCAmelCase =numpy.array([0, 0]) __lowerCAmelCase =numpy.array([0.5, 0.866_0254]) __lowerCAmelCase =numpy.array([1, 0]) __lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = initial_vectors for _ in range(_UpperCAmelCase ): a_ = iteration_step(_UpperCAmelCase ) return vectors def a ( _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = [] for i, start_vector in enumerate(vectors[:-1] ): a_ = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) a_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray: """simple docstring""" a_ = numpy.radians(_UpperCAmelCase ) a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) a_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase , _UpperCAmelCase ) def a ( _UpperCAmelCase ) -> None: """simple docstring""" a_ = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase , _UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
697
1
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) __lowerCAmelCase =logging.getLogger() __lowerCAmelCase =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _snake_case ( snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Optional[int]: os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) a_ = {'source': 'What is love ?', 'target': 'life'} a_ = {'train': 12, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: a_ = '\n'.join([contents[field]] * n_lines[split] ) with open(os.path.join(UpperCAmelCase__ , F'''{split}.{field}''' ) , 'w' ) as f: f.write(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = "pytorch" ) -> str: a_ = self.get_auto_remove_tmp_dir() a_ = os.path.join(UpperCAmelCase__ , 'output' ) a_ = os.path.join(UpperCAmelCase__ , 'data' ) self._create_dummy_data(data_dir=UpperCAmelCase__ ) a_ = F''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(F'''--gpus={gpus}''' ) if is_apex_available(): testargs.append('--fp16' ) else: testargs.append('--gpus=0' ) testargs.append('--distributed_backend=ddp_cpu' ) testargs.append('--num_processes=2' ) a_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() ) a_ = os.path.join(UpperCAmelCase__ , 'metrics.json' ) with open(UpperCAmelCase__ ) as f: a_ = json.load(UpperCAmelCase__ ) return result @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_gpu @require_ray def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu @require_ray def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
697
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a ( _UpperCAmelCase ) -> int: """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" for char in word: a_ = ord(_UpperCAmelCase ) if not _is_chinese_char(_UpperCAmelCase ): return 0 return 1 def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = set() for token in tokens: a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase ) if chinese_word: word_set.add(_UpperCAmelCase ) a_ = list(_UpperCAmelCase ) return word_list def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" if not chinese_word_set: return bert_tokens a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] ) a_ = bert_tokens a_ , a_ = 0, len(_UpperCAmelCase ) while start < end: a_ = True if is_chinese(bert_word[start] ): a_ = min(end - start , _UpperCAmelCase ) for i in range(_UpperCAmelCase , 1 , -1 ): a_ = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ = '##' + bert_word[j] a_ = start + i a_ = False break if single_word: start += 1 return bert_word def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws a_ = [get_chinese_word(_UpperCAmelCase ) for r in res] ltp_res.extend(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 ) bert_res.extend(res['input_ids'] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ): a_ = [] for id in input_ids: a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase ) input_tokens.append(_UpperCAmelCase ) a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase ) a_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_UpperCAmelCase ): if token[:2] == "##": a_ = token[2:] # save chinese tokens' pos if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ): ref_id.append(_UpperCAmelCase ) ref_ids.append(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) return ref_ids def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: a_ = f.readlines() a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ = LTP(args.ltp ) # faster in GPU device a_ = BertTokenizer.from_pretrained(args.bert ) a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids] f.writelines(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __lowerCAmelCase =parser.parse_args() main(args)
697
1
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def a ( _UpperCAmelCase=None , _UpperCAmelCase=None ) -> List[str]: """simple docstring""" return field(default_factory=lambda: default , metadata=_UpperCAmelCase ) @dataclass class _snake_case : """simple docstring""" _UpperCamelCase = field( metadata={"help": "The csv file to plot."} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Disable logarithmic scale when plotting"} , ) _UpperCamelCase = field( default=snake_case , metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." } , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , ) _UpperCamelCase = list_field( default=snake_case , metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def a ( _UpperCAmelCase ) -> List[Any]: """simple docstring""" try: int(_UpperCAmelCase ) return True except ValueError: return False def a ( _UpperCAmelCase ) -> Optional[int]: """simple docstring""" try: float(_UpperCAmelCase ) return True except ValueError: return False class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ ) -> Any: a_ = args a_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: a_ = csv.DictReader(UpperCAmelCase__ ) for row in reader: a_ = row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None a_ = int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None a_ = float(row['result'] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ , a_ = plt.subplots() a_ = 'Time usage' if self.args.is_time else 'Memory usage' a_ = title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): a_ = sorted(set(self.result_dict[model_name]['bsz'] ) ) a_ = sorted(set(self.result_dict[model_name]['seq_len'] ) ) a_ = self.result_dict[model_name]['result'] ((a_) , (a_)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) a_ = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: a_ = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase__ , ) else: a_ = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((a_) , (a_)) = ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) a_ = np.asarray(UpperCAmelCase__ , UpperCAmelCase__ )[: len(UpperCAmelCase__ )] plt.scatter( UpperCAmelCase__ , UpperCAmelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(UpperCAmelCase__ , UpperCAmelCase__ , '--' ) title_str += F''' {label_model_name} vs.''' a_ = title_str[:-4] a_ = 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(UpperCAmelCase__ ) plt.xlabel(UpperCAmelCase__ ) plt.ylabel(UpperCAmelCase__ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def a ( ) -> int: """simple docstring""" a_ = HfArgumentParser(_UpperCAmelCase ) a_ = parser.parse_args_into_dataclasses()[0] a_ = Plot(args=_UpperCAmelCase ) plot.plot() if __name__ == "__main__": main()
697
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __lowerCAmelCase ={ "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": 512, } __lowerCAmelCase ={ "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = RetriBertTokenizer _UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars ): a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) ) a_ = do_lower_case a_ = strip_accents a_ = tokenize_chinese_chars a_ = normalizer_class(**UpperCAmelCase__ ) a_ = do_lower_case def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str: a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]: a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
697
1
'''simple docstring''' def a ( _UpperCAmelCase ) -> list: """simple docstring""" if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(_UpperCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
697
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "lilt" def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = classifier_dropout a_ = channel_shrink_ratio a_ = max_ad_position_embeddings
697
1
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename __lowerCAmelCase ="http://www.mocksite.com/file1.txt" __lowerCAmelCase ="\"text\": [\"foo\", \"foo\"]" __lowerCAmelCase ="6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class _snake_case : """simple docstring""" _UpperCamelCase = 200 _UpperCamelCase = {"Content-Length": "100"} _UpperCamelCase = {} def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> List[Any]: return [bytes(UpperCAmelCase__ , 'utf-8' )] def a ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: """simple docstring""" return MockResponse() @pytest.mark.parametrize('urls_type' , [str, list, dict] ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: """simple docstring""" import requests monkeypatch.setattr(_UpperCAmelCase , 'request' , _UpperCAmelCase ) a_ = URL if issubclass(_UpperCAmelCase , _UpperCAmelCase ): a_ = url elif issubclass(_UpperCAmelCase , _UpperCAmelCase ): a_ = [url] elif issubclass(_UpperCAmelCase , _UpperCAmelCase ): a_ = {'train': url} a_ = 'dummy' a_ = 'downloads' a_ = tmp_path a_ = DownloadConfig( cache_dir=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , use_etag=_UpperCAmelCase , ) a_ = DownloadManager(dataset_name=_UpperCAmelCase , download_config=_UpperCAmelCase ) a_ = dl_manager.download(_UpperCAmelCase ) a_ = urls for downloaded_paths in [downloaded_paths]: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = [downloaded_paths] a_ = [urls] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): assert "train" in downloaded_paths.keys() a_ = downloaded_paths.values() a_ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_UpperCAmelCase , _UpperCAmelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] a_ = Path(_UpperCAmelCase ) a_ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() a_ = downloaded_path.read_text() assert content == CONTENT a_ = downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() a_ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type' , [str, list, dict] ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" a_ = str(_UpperCAmelCase ) if issubclass(_UpperCAmelCase , _UpperCAmelCase ): a_ = filename elif issubclass(_UpperCAmelCase , _UpperCAmelCase ): a_ = [filename] elif issubclass(_UpperCAmelCase , _UpperCAmelCase ): a_ = {'train': filename} a_ = 'dummy' a_ = xz_file.parent a_ = 'extracted' a_ = DownloadConfig( cache_dir=_UpperCAmelCase , use_etag=_UpperCAmelCase , ) a_ = DownloadManager(dataset_name=_UpperCAmelCase , download_config=_UpperCAmelCase ) a_ = dl_manager.extract(_UpperCAmelCase ) a_ = paths for extracted_paths in [extracted_paths]: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): a_ = [extracted_paths] a_ = [paths] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): assert "train" in extracted_paths.keys() a_ = extracted_paths.values() a_ = paths.values() assert extracted_paths for extracted_path, input_path in zip(_UpperCAmelCase , _UpperCAmelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] a_ = Path(_UpperCAmelCase ) a_ = extracted_path.parts assert parts[-1] == hash_url_to_filename(_UpperCAmelCase , etag=_UpperCAmelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() a_ = extracted_path.read_text() a_ = text_file.read_text() assert extracted_file_content == expected_file_content def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" assert path.endswith('.jsonl' ) for num_items, line in enumerate(_UpperCAmelCase , start=1 ): a_ = json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = request.getfixturevalue(_UpperCAmelCase ) a_ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_UpperCAmelCase ) , start=1 ): _test_jsonl(_UpperCAmelCase , _UpperCAmelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict: """simple docstring""" a_ = request.getfixturevalue(_UpperCAmelCase ) a_ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_UpperCAmelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_UpperCAmelCase ) , start=1 ): _test_jsonl(_UpperCAmelCase , _UpperCAmelCase ) assert num_tar == 1 assert num_jsonl == 2 def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" a_ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_UpperCAmelCase ) , start=1 ): assert os.path.basename(_UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
697
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # We need to create solution object to save path. a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase ) if solved: print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): a_ = 1 return True a_ = (not i < 0) and (not j < 0) # Check lower bounds a_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. a_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited a_ = 1 # check for directions if ( run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase ) ): return True a_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
697
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase ={ "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =["CLIPFeatureExtractor"] __lowerCAmelCase =["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
'''simple docstring''' __lowerCAmelCase ={ "meter": "m", "kilometer": "km", "megametre": "Mm", "gigametre": "Gm", "terametre": "Tm", "petametre": "Pm", "exametre": "Em", "zettametre": "Zm", "yottametre": "Ym", } # Exponent of the factor(meter) __lowerCAmelCase ={ "m": 0, "km": 3, "Mm": 6, "Gm": 9, "Tm": 12, "Pm": 15, "Em": 18, "Zm": 21, "Ym": 24, } def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float: """simple docstring""" a_ = from_type.lower().strip('s' ) a_ = to_type.lower().strip('s' ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) if from_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'from_type\' value: {from_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) if to_sanitized not in METRIC_CONVERSION: a_ = ( F'''Invalid \'to_type\' value: {to_type!r}.\n''' F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}''' ) raise ValueError(_UpperCAmelCase ) a_ = METRIC_CONVERSION[from_sanitized] a_ = METRIC_CONVERSION[to_sanitized] a_ = 1 if from_exponent > to_exponent: a_ = from_exponent - to_exponent else: a_ = -(to_exponent - from_exponent) return value * pow(1_0 , _UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 __lowerCAmelCase =0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 __lowerCAmelCase =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _snake_case : """simple docstring""" def __init__( self ) -> Union[str, Any]: a_ = WATERMARK_BITS a_ = WatermarkEncoder() self.encoder.set_watermark('bits' , self.watermark ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int: # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images a_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a_ = [self.encoder.encode(UpperCAmelCase__ , 'dwtDct' ) for image in images] a_ = torch.from_numpy(np.array(UpperCAmelCase__ ) ).permute(0 , 3 , 1 , 2 ) a_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
697
'''simple docstring''' import unittest from transformers import DonutProcessor __lowerCAmelCase ="naver-clova-ix/donut-base" class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } a_ = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) a_ = self.processor.tokenajson(UpperCAmelCase__ ) self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "" _UpperCamelCase = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Tuple: super().__init__(self , **UpperCAmelCase__ ) a_ = repo_info a_ = token a_ = None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: if self.dir_cache is None: a_ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a_ = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(UpperCAmelCase__ ): {'name': str(UpperCAmelCase__ ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = "rb" , **UpperCAmelCase__ , ) -> List[str]: if not isinstance(self.repo_info , UpperCAmelCase__ ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a_ = hf_hub_url(self.repo_info.id , UpperCAmelCase__ , revision=self.repo_info.sha ) return fsspec.open( UpperCAmelCase__ , mode=UpperCAmelCase__ , headers=get_authentication_headers_for_url(UpperCAmelCase__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open() def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , **UpperCAmelCase__ ) -> str: self._get_dirs() a_ = self._strip_protocol(UpperCAmelCase__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=False , **UpperCAmelCase__ ) -> Dict: self._get_dirs() a_ = PurePosixPath(path.strip('/' ) ) a_ = {} for p, f in self.dir_cache.items(): a_ = PurePosixPath(p.strip('/' ) ) a_ = p.parent if root == path: a_ = f a_ = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
697
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]: a_ = parent a_ = out_indices if out_indices is not None else [4] a_ = stage_names a_ = out_features a_ = backbone a_ = batch_size a_ = image_size a_ = num_channels a_ = use_pretrained_backbone a_ = is_training def __SCREAMING_SNAKE_CASE ( self ) -> str: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = TimmBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(UpperCAmelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = self.prepare_config_and_inputs() a_ , a_ = config_and_inputs a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (TimmBackbone,) if is_torch_available() else () _UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = TimmBackboneModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = 'resnet18' a_ = 'microsoft/resnet-18' a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] ) a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass @unittest.skip('Safetensors is not supported by timm.' ) def __SCREAMING_SNAKE_CASE ( self ) -> str: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: pass def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = True a_ = self.has_attentions # no need to test all models as different heads yield the same functionality a_ = self.all_model_classes[0] a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = model(**UpperCAmelCase__ ) a_ = outputs[0][-1] # Encoder-/Decoder-only models a_ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a_ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=UpperCAmelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = None a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a_ = copy.deepcopy(UpperCAmelCase__ ) a_ = False a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(**UpperCAmelCase__ )
697
1
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase =logging.get_logger(__name__) def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" a_ = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) a_ = MaskFormerConfig(backbone_config=_UpperCAmelCase ) a_ = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok a_ = 8_4_7 a_ = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok a_ = 1_5_0 a_ = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok a_ = 1_7_1 a_ = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO a_ = 1_3_3 a_ = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok a_ = 1_9 a_ = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok a_ = 6_5 a_ = 'mapillary-vistas-id2label.json' a_ = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) a_ = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} return config def a ( _UpperCAmelCase ) -> Dict: """simple docstring""" a_ = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" a_ = dct.pop(_UpperCAmelCase ) a_ = val def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" a_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): a_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) a_ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) a_ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[:dim, :] a_ = in_proj_bias[: dim] a_ = in_proj_weight[ dim : dim * 2, : ] a_ = in_proj_bias[ dim : dim * 2 ] a_ = in_proj_weight[ -dim :, : ] a_ = in_proj_bias[-dim :] # fmt: on def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" a_ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) a_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[: hidden_size, :] a_ = in_proj_bias[:config.hidden_size] a_ = in_proj_weight[hidden_size : hidden_size * 2, :] a_ = in_proj_bias[hidden_size : hidden_size * 2] a_ = in_proj_weight[-hidden_size :, :] a_ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) a_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) a_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a_ = in_proj_weight[: hidden_size, :] a_ = in_proj_bias[:config.hidden_size] a_ = in_proj_weight[hidden_size : hidden_size * 2, :] a_ = in_proj_bias[hidden_size : hidden_size * 2] a_ = in_proj_weight[-hidden_size :, :] a_ = in_proj_bias[-hidden_size :] # fmt: on def a ( ) -> torch.Tensor: """simple docstring""" a_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' a_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> Optional[Any]: """simple docstring""" a_ = get_maskformer_config(_UpperCAmelCase ) # load original state_dict with open(_UpperCAmelCase , 'rb' ) as f: a_ = pickle.load(_UpperCAmelCase ) a_ = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys a_ = create_rename_keys(_UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase ) # update to torch tensors for key, value in state_dict.items(): a_ = torch.from_numpy(_UpperCAmelCase ) # load 🤗 model a_ = MaskFormerForInstanceSegmentation(_UpperCAmelCase ) model.eval() for name, param in model.named_parameters(): print(_UpperCAmelCase , param.shape ) a_ , a_ = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(_UpperCAmelCase ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results a_ = prepare_img() if "vistas" in model_name: a_ = 6_5 elif "cityscapes" in model_name: a_ = 6_5_5_3_5 else: a_ = 2_5_5 a_ = True if 'ade' in model_name else False a_ = MaskFormerImageProcessor(ignore_index=_UpperCAmelCase , reduce_labels=_UpperCAmelCase ) a_ = image_processor(_UpperCAmelCase , return_tensors='pt' ) a_ = model(**_UpperCAmelCase ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": a_ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __lowerCAmelCase =parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
697
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]: a_ = size if size is not None else {'height': 18, 'width': 18} a_ = parent a_ = batch_size a_ = num_channels a_ = image_size a_ = min_resolution a_ = max_resolution a_ = do_resize a_ = size a_ = apply_ocr def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = LayoutLMvaImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , UpperCAmelCase__ ) self.assertIsInstance(encoding.boxes , UpperCAmelCase__ ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # Initialize image_processing a_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # with apply_OCR = True a_ = LayoutLMvaImageProcessor() from datasets import load_dataset a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) a_ = Image.open(ds[0]['file'] ).convert('RGB' ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCAmelCase__ ) self.assertListEqual(encoding.boxes , UpperCAmelCase__ ) # with apply_OCR = False a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
697
1
'''simple docstring''' import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = LxmertTokenizer _UpperCamelCase = LxmertTokenizerFast _UpperCamelCase = True _UpperCamelCase = True def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: super().setUp() a_ = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Any: a_ = 'UNwant\u00E9d,running' a_ = 'unwanted, running' return input_text, output_text def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = self.tokenizer_class(self.vocab_file ) a_ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(UpperCAmelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [7, 4, 5, 10, 8, 9] ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: if not self.test_rust_tokenizer: return a_ = self.get_tokenizer() a_ = self.get_rust_tokenizer() a_ = 'I was born in 92000, and this is falsé.' a_ = tokenizer.tokenize(UpperCAmelCase__ ) a_ = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) a_ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = self.get_rust_tokenizer() a_ = tokenizer.encode(UpperCAmelCase__ ) a_ = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
697
'''simple docstring''' import math def a ( _UpperCAmelCase ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int: """simple docstring""" try: a_ = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) a_ = [] a_ = 2 while len(_UpperCAmelCase ) < nth: if is_prime(_UpperCAmelCase ): primes.append(_UpperCAmelCase ) num += 1 else: num += 1 return primes[len(_UpperCAmelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) a_ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
697
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = [1, 2, 3, 4] a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = '' a_ , a_ = process_story(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , [] ) self.assertEqual(UpperCAmelCase__ , [] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) a_ , a_ = process_story(UpperCAmelCase__ ) a_ = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = ['It was the best of times.'] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = torch.tensor([1, 2, 3, 4] ) a_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = 101 a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ ) np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
697
1
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __lowerCAmelCase =logging.getLogger(__name__) __lowerCAmelCase =list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __lowerCAmelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _snake_case : """simple docstring""" _UpperCamelCase = field( default=snake_case , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(snake_case )} , ) _UpperCamelCase = field( default=snake_case , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) _UpperCamelCase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) _UpperCamelCase = field( default=snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' ) @dataclass class _snake_case : """simple docstring""" _UpperCamelCase = field( default=snake_case , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) _UpperCamelCase = field( default=snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) _UpperCamelCase = field(default=snake_case , metadata={"help": "The input training data file (a text file)."} ) _UpperCamelCase = field( default=snake_case , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} ) _UpperCamelCase = field( default=5 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) _UpperCamelCase = field( default=snake_case , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) } , ) _UpperCamelCase = field( default=snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , ) _UpperCamelCase = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) _UpperCamelCase = field( default=snake_case , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: if self.train_file is not None: a_ = self.train_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: a_ = self.validation_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f: a_ = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())] assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = {c: dataset[c] for c in dataset.column_names} a_ = refs return Dataset.from_dict(_UpperCAmelCase ) def a ( ) -> List[str]: """simple docstring""" a_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a_ , a_ , a_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a_ , a_ , a_ = parser.parse_args_into_dataclasses() # Detecting last checkpoint. a_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _UpperCAmelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. a_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): a_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , ) a_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , ) else: a_ = {} if data_args.train_file is not None: a_ = data_args.train_file if data_args.validation_file is not None: a_ = data_args.validation_file a_ = data_args.train_file.split('.' )[-1] if extension == "txt": a_ = 'text' a_ = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a_ = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: a_ = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase ) elif model_args.model_name_or_path: a_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase ) else: a_ = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) a_ = { 'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: a_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase ) elif model_args.model_name_or_path: a_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) if model_args.model_name_or_path: a_ = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) a_ = AutoModelForMaskedLM.from_config(_UpperCAmelCase ) model.resize_token_embeddings(len(_UpperCAmelCase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: a_ = datasets['train'].column_names else: a_ = datasets['validation'].column_names a_ = 'text' if 'text' in column_names else column_names[0] a_ = 'max_length' if data_args.pad_to_max_length else False def tokenize_function(_UpperCAmelCase ): # Remove empty lines a_ = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()] return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length ) a_ = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: a_ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: a_ = add_chinese_references( tokenized_datasets['validation'] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer a_ = data_args.train_ref_file or data_args.validation_ref_file if has_ref: a_ = False # Data collator # This one will take care of randomly masking the tokens. a_ = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer a_ = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: if last_checkpoint is not None: a_ = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): a_ = model_args.model_name_or_path else: a_ = None a_ = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload a_ = os.path.join(training_args.output_dir , 'train_results.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Train results *****' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # Evaluation a_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) a_ = trainer.evaluate() a_ = math.exp(eval_output['eval_loss'] ) a_ = perplexity a_ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' ) if trainer.is_world_process_zero(): with open(_UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in sorted(results.items() ): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) return results def a ( _UpperCAmelCase ) -> Any: """simple docstring""" main() if __name__ == "__main__": main()
697
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class _snake_case ( unittest.TestCase , snake_case ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: a_ = load_tool('text-question-answering' ) self.tool.setup() a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' ) self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
697
1
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels __lowerCAmelCase =object() # For specifying empty leaf dict `{}` __lowerCAmelCase =object() def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" a_ = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(_UpperCAmelCase ) - len(_UpperCAmelCase ) + 1 ): a_ = [x.match(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , ks[i:] )] if matches and all(_UpperCAmelCase ): return True return False def a ( _UpperCAmelCase ) -> List[Any]: """simple docstring""" def replace(_UpperCAmelCase , _UpperCAmelCase ): for rule, replacement in rules: if _match(_UpperCAmelCase , _UpperCAmelCase ): return replacement return val return replace def a ( ) -> Optional[int]: """simple docstring""" return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , _UpperCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , _UpperCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , _UpperCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_UpperCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , _UpperCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def a ( _UpperCAmelCase ) -> Dict: """simple docstring""" a_ = _get_partition_rules() a_ = _replacement_rules(_UpperCAmelCase ) a_ = {k: _unmatched for k in flatten_dict(_UpperCAmelCase )} a_ = {k: replace(_UpperCAmelCase , _UpperCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_UpperCAmelCase ) )
697
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase =[ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
697
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer a_ = flax_key_tuple[:-1] + ('weight',) a_ = torch.permute(_UpperCAmelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ): # linear layer a_ = flax_key_tuple[:-1] + ('weight',) a_ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: a_ = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: """simple docstring""" if "metadata" in layer: a_ = layer.split('metadata' ) a_ = ''.join(split_layer[0] )[:-1] a_ = [tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: a_ = layer.split('kvstore' ) a_ = ''.join(split_layer[0] )[:-1] a_ = [tuple(('kvstore' + split_layer[1]).split('/' ) )] else: a_ = layer.split('/' ) a_ = '/'.join(split_layer[:-1] ) a_ = (split_layer[-1],) if "kvstore/path" in layer: a_ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: a_ = 'file' else: a_ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" a_ = rename_keys(_UpperCAmelCase ) a_ = {} for k, v in current_block.items(): a_ = v a_ = new_current_block torch.save(_UpperCAmelCase , _UpperCAmelCase ) def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME ) -> str: """simple docstring""" a_ = convert_file_size_to_int(_UpperCAmelCase ) a_ = [] a_ = {} a_ = 0 a_ = 0 os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: a_ = serialization.msgpack_restore(fp.read() )['optimizer']['target'] a_ = flatten_dict(_UpperCAmelCase , sep='/' ) a_ = {} for layer in checkpoint_info.keys(): a_ , a_ , a_ = get_key_and_tensorstore_dict( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if curr_real_layer_name in all_layers: a_ = content else: a_ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file a_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() a_ = torch.tensor(_UpperCAmelCase ) a_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts a_ , a_ = rename_base_flax_keys(tuple(key.split('/' ) ) , _UpperCAmelCase ) a_ = '/'.join(_UpperCAmelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: a_ = os.path.join( _UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block a_ = {} a_ = 0 a_ = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block a_ = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(_UpperCAmelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index a_ = {} a_ = {} for idx, shard in enumerate(_UpperCAmelCase ): a_ = weights_name.replace( '.bin' , F'''-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d} a_ = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) a_ = shard for key in shard: a_ = shard_file # Add the metadata a_ = {'total_size': total_size} a_ = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' , encoding='utf-8' ) as f: a_ = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '\n' f.write(_UpperCAmelCase ) return metadata, index if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __lowerCAmelCase =parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a ( ) -> int: """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer a_ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) a_ = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) a_ = TaTokenizer.from_pretrained('t5-small' ) a_ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' a_ = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids a_ = model.generate(_UpperCAmelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
697
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase =logging.get_logger(__name__) __lowerCAmelCase ={ "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = "vit" def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict: super().__init__(**UpperCAmelCase__ ) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-4
697
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["input_features", "attention_mask"] def __init__( self , UpperCAmelCase__=80 , UpperCAmelCase__=1_6000 , UpperCAmelCase__=0.0 , UpperCAmelCase__=10 , UpperCAmelCase__=25 , UpperCAmelCase__="hamming_window" , UpperCAmelCase__=3_2_7_6_8.0 , UpperCAmelCase__=0.9_7 , UpperCAmelCase__=1.0 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , **UpperCAmelCase__ , ) -> int: super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ ) a_ = feature_size a_ = sampling_rate a_ = padding_value a_ = hop_length a_ = win_length a_ = frame_signal_scale a_ = preemphasis_coeff a_ = mel_floor a_ = normalize_means a_ = normalize_vars a_ = win_function a_ = return_attention_mask a_ = win_length * sampling_rate // 1000 a_ = hop_length * sampling_rate // 1000 a_ = optimal_fft_length(self.sample_size ) a_ = (self.n_fft // 2) + 1 def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray: if self.win_function == "hamming_window": a_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase__ ) else: a_ = window_function(window_length=self.sample_size , name=self.win_function ) a_ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) a_ = spectrogram( one_waveform * self.frame_signal_scale , window=UpperCAmelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCAmelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCAmelCase__ , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: # make sure we normalize float32 arrays if self.normalize_means: a_ = x[:input_length].mean(axis=0 ) a_ = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ ) if self.normalize_vars: a_ = x[:input_length].std(axis=0 ) a_ = np.divide(UpperCAmelCase__ , UpperCAmelCase__ ) if input_length < x.shape[0]: a_ = padding_value # make sure array is in float32 a_ = x.astype(np.floataa ) return x def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[np.ndarray]: a_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(UpperCAmelCase__ , UpperCAmelCase__ , self.padding_value ) for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )] def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a_ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ): a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa ) elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a_ = [raw_speech] # extract fbank features a_ = [self._extract_mfsc_features(UpperCAmelCase__ ) for one_waveform in raw_speech] # convert into correct format for padding a_ = BatchFeature({'input_features': features} ) a_ = self.pad( UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) # make sure list is in array format a_ = padded_inputs.get('input_features' ) if isinstance(input_features[0] , UpperCAmelCase__ ): a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features] a_ = padded_inputs.get('attention_mask' ) if attention_mask is not None: a_ = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: a_ = ( np.array(UpperCAmelCase__ , dtype=np.intaa ) if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) a_ = self.normalize( padded_inputs['input_features'] , attention_mask=UpperCAmelCase__ ) if return_tensors is not None: a_ = padded_inputs.convert_to_tensors(UpperCAmelCase__ ) return padded_inputs
697
'''simple docstring''' def a ( _UpperCAmelCase = 5_0 ) -> int: """simple docstring""" a_ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
697
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[int]: """simple docstring""" if attention_mask is None: a_ = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class _snake_case : """simple docstring""" _UpperCamelCase = OPTConfig _UpperCamelCase = {} _UpperCamelCase = "gelu" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=99 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=20 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=16 , UpperCAmelCase__=16 , ) -> str: a_ = parent a_ = batch_size a_ = seq_length a_ = is_training a_ = use_labels a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = eos_token_id a_ = pad_token_id a_ = bos_token_id a_ = embed_dim a_ = word_embed_proj_dim a_ = False def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) a_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) a_ = tf.concat([input_ids, eos_tensor] , axis=1 ) a_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase__ , **self.config_updates , ) a_ = prepare_opt_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: a_ = TFOPTModel(config=UpperCAmelCase__ ) a_ = inputs_dict['input_ids'] a_ = input_ids[:1, :] a_ = inputs_dict['attention_mask'][:1, :] a_ = 1 # first forward pass a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) a_ , a_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) a_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and a_ = tf.concat([input_ids, next_tokens] , axis=-1 ) a_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice a_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) a_ = output_from_no_past[:, -3:, random_slice_idx] a_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 ) @require_tf class _snake_case ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () _UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else () _UpperCamelCase = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = 10 def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ = TFOPTModelTester(self ) a_ = ConfigTester(self , config_class=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> int: self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(UpperCAmelCase__ , UpperCAmelCase__ ): if hasattr(UpperCAmelCase__ , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(UpperCAmelCase__ , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings a_ = model_class(config=UpperCAmelCase__ ) a_ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_input_embeddings() ) a_ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(UpperCAmelCase__ ) a_ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_input_embeddings() ) a_ = _get_word_embedding_weight(UpperCAmelCase__ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. a_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase__ ) # check that weights remain the same after resizing a_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: a_ = False self.assertTrue(UpperCAmelCase__ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase__ ) a_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: a_ = False self.assertTrue(UpperCAmelCase__ ) def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" return tf.constant(_UpperCAmelCase , dtype=tf.intaa ) @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" _UpperCamelCase = 99 def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 a_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) a_ = input_ids.shape[0] a_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) a_ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) a_ = tf.not_equal(UpperCAmelCase__ , model.config.pad_token_id ) with tf.GradientTape(): a_ = model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).last_hidden_state a_ = (1, 11, 512) self.assertEqual(output.shape , UpperCAmelCase__ ) a_ = tf.constant( [[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=4e-3 ) ) a_ = tf.function(UpperCAmelCase__ , jit_compile=UpperCAmelCase__ ) a_ = xla_generate(UpperCAmelCase__ , UpperCAmelCase__ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=4e-2 ) ) @require_tf @slow class _snake_case ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self ) -> int: super().setUp() a_ = 'facebook/opt-350m' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = TFOPTForCausalLM.from_pretrained(self.path_model ) a_ = GPTaTokenizer.from_pretrained(self.path_model ) a_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False a_ = tokenizer(UpperCAmelCase__ , return_tensors='tf' , padding=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) a_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) a_ = tf.constant( [ [1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0], [-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2], [0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3], [6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7], ] ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-4 ) ) a_ = tf.function(UpperCAmelCase__ , jit_compile=UpperCAmelCase__ ) a_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-4 ) ) @require_tf @slow class _snake_case ( unittest.TestCase ): """simple docstring""" @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 'facebook/opt-125m' a_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] a_ = [] a_ = GPTaTokenizer.from_pretrained(UpperCAmelCase__ ) a_ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ ) for prompt in self.prompts: a_ = tokenizer(UpperCAmelCase__ , return_tensors='tf' ).input_ids a_ = model.generate(UpperCAmelCase__ , max_length=10 ) a_ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) predicted_outputs += generated_string self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = 'facebook/opt-350m' a_ = GPTaTokenizer.from_pretrained(UpperCAmelCase__ ) a_ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ ) a_ = 'left' # use different length sentences to test batching a_ = [ 'Hello, my dog is a little', 'Today, I', ] a_ = tokenizer(UpperCAmelCase__ , return_tensors='tf' , padding=UpperCAmelCase__ ) a_ = inputs['input_ids'] a_ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['attention_mask'] ) a_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids a_ = model.generate(input_ids=UpperCAmelCase__ ) a_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) a_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids a_ = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) a_ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) a_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a_ = 'facebook/opt-350m' a_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] a_ = [] a_ = GPTaTokenizer.from_pretrained(UpperCAmelCase__ ) a_ = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ ) for prompt in self.prompts: a_ = tokenizer(UpperCAmelCase__ , return_tensors='tf' ).input_ids a_ = model.generate(UpperCAmelCase__ , max_length=10 ) a_ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) predicted_outputs += generated_string self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
697
'''simple docstring''' def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def a ( ) -> None: """simple docstring""" print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
697
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> None: warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
697
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a ( _UpperCAmelCase ) -> int: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _snake_case ( snake_case ): """simple docstring""" @staticmethod def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str: a_ = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' ) download_parser.set_defaults(func=UpperCAmelCase__ ) def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = model a_ = cache a_ = force a_ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
697
1
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __lowerCAmelCase =numpy.array([0, 0]) __lowerCAmelCase =numpy.array([0.5, 0.866_0254]) __lowerCAmelCase =numpy.array([1, 0]) __lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = initial_vectors for _ in range(_UpperCAmelCase ): a_ = iteration_step(_UpperCAmelCase ) return vectors def a ( _UpperCAmelCase ) -> list[numpy.ndarray]: """simple docstring""" a_ = [] for i, start_vector in enumerate(vectors[:-1] ): a_ = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) a_ = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray: """simple docstring""" a_ = numpy.radians(_UpperCAmelCase ) a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) a_ = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase , _UpperCAmelCase ) def a ( _UpperCAmelCase ) -> None: """simple docstring""" a_ = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase , _UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
697
'''simple docstring''' def a ( _UpperCAmelCase ) -> int: """simple docstring""" assert column_title.isupper() a_ = 0 a_ = len(_UpperCAmelCase ) - 1 a_ = 0 while index >= 0: a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
697
1
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( snake_case , snake_case , snake_case ): """simple docstring""" @register_to_config def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False , ) -> Any: super().__init__() a_ = nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ ) a_ = False a_ = nn.Dropout(p=UpperCAmelCase__ ) a_ = TaConfig( vocab_size=UpperCAmelCase__ , d_model=UpperCAmelCase__ , num_heads=UpperCAmelCase__ , d_kv=UpperCAmelCase__ , d_ff=UpperCAmelCase__ , dropout_rate=UpperCAmelCase__ , feed_forward_proj=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , ) a_ = nn.ModuleList() for lyr_num in range(UpperCAmelCase__ ): a_ = TaBlock(UpperCAmelCase__ ) self.encoders.append(UpperCAmelCase__ ) a_ = TaLayerNorm(UpperCAmelCase__ ) a_ = nn.Dropout(p=UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: a_ = self.token_embedder(UpperCAmelCase__ ) a_ = encoder_input_tokens.shape[1] a_ = torch.arange(UpperCAmelCase__ , device=encoder_input_tokens.device ) x += self.position_encoding(UpperCAmelCase__ ) a_ = self.dropout_pre(UpperCAmelCase__ ) # inverted the attention mask a_ = encoder_input_tokens.size() a_ = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ ) for lyr in self.encoders: a_ = lyr(UpperCAmelCase__ , UpperCAmelCase__ )[0] a_ = self.layer_norm(UpperCAmelCase__ ) return self.dropout_post(UpperCAmelCase__ ), encoder_inputs_mask
697
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase =logging.get_logger(__name__) class _snake_case ( snake_case ): """simple docstring""" _UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]: super().__init__( feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , ) a_ = spectrogram_length a_ = num_channels a_ = patch_size a_ = feature_size // self.patch_size[1] a_ = n_fft a_ = sampling_rate // hop_length_to_sampling_rate a_ = sampling_rate a_ = padding_value a_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray: a_ = spectrogram( UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , ) a_ = log_spec[:, :-1] a_ = log_spec - 2_0.0 a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a_ = is_batched_numpy or ( isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ): a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa ) elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a_ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a_ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCAmelCase__ ): a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a_ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a_ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) # convert into correct format for padding a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a_ = padded_audio_features * self.padding_value for i in range(len(UpperCAmelCase__ ) ): a_ = audio_features[i] a_ = feature # return as BatchFeature if return_attention_mask: a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: a_ = {'audio_values': padded_audio_features} a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) return encoded_inputs
697
1